diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 05e0840c..3136ceae 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: c3693e4872a0785b2ed46c59a8464804 + docChecksum: 3135f1ce6dd57e0487ee2840362ced1a docVersion: 1.0.0 speakeasyVersion: 1.606.10 generationVersion: 2.687.13 - releaseVersion: 1.9.11 - configChecksum: d84e605ef7a3265972f6695049243759 + releaseVersion: 1.10.0 + configChecksum: 1446aab5f184e7184590fe5756b556a8 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -58,12 +58,14 @@ generatedFiles: - docs/models/agenthandoffstartedevent.md - docs/models/agenthandoffstartedeventtype.md - docs/models/agentobject.md + - docs/models/agentsapiv1agentsdeleterequest.md - docs/models/agentsapiv1agentsgetrequest.md - docs/models/agentsapiv1agentslistrequest.md - docs/models/agentsapiv1agentsupdaterequest.md - docs/models/agentsapiv1agentsupdateversionrequest.md - docs/models/agentsapiv1conversationsappendrequest.md - docs/models/agentsapiv1conversationsappendstreamrequest.md + - docs/models/agentsapiv1conversationsdeleterequest.md - docs/models/agentsapiv1conversationsgetrequest.md - docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md - docs/models/agentsapiv1conversationshistoryrequest.md @@ -89,6 +91,7 @@ generatedFiles: - docs/models/assistantmessage.md - docs/models/assistantmessagecontent.md - docs/models/assistantmessagerole.md + - docs/models/attributes.md - docs/models/audiochunk.md - docs/models/audiochunktype.md - docs/models/audiotranscriptionrequest.md @@ -220,6 +223,7 @@ generatedFiles: - docs/models/fimcompletionstreamrequeststop.md - docs/models/finetuneablemodeltype.md - docs/models/finishreason.md + - docs/models/format_.md - docs/models/ftclassifierlossfunction.md - docs/models/ftmodelcapabilitiesout.md - docs/models/ftmodelcard.md @@ -336,12 +340,14 @@ generatedFiles: - docs/models/modeltype.md - docs/models/moderationobject.md - docs/models/moderationresponse.md + - docs/models/name.md - docs/models/object.md - docs/models/ocrimageobject.md - docs/models/ocrpagedimensions.md - docs/models/ocrpageobject.md - docs/models/ocrrequest.md - docs/models/ocrresponse.md + - docs/models/ocrtableobject.md - docs/models/ocrusageinfo.md - docs/models/one.md - docs/models/outputcontentchunks.md @@ -353,6 +359,7 @@ generatedFiles: - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/repositories.md + - docs/models/requestsource.md - docs/models/response1.md - docs/models/responsebody.md - docs/models/responsedoneevent.md @@ -380,6 +387,7 @@ generatedFiles: - docs/models/systemmessage.md - docs/models/systemmessagecontent.md - docs/models/systemmessagecontentchunks.md + - docs/models/tableformat.md - docs/models/textchunk.md - docs/models/textchunktype.md - docs/models/thinkchunk.md @@ -391,13 +399,16 @@ generatedFiles: - docs/models/toolchoice.md - docs/models/toolchoiceenum.md - docs/models/toolexecutiondeltaevent.md + - docs/models/toolexecutiondeltaeventname.md - docs/models/toolexecutiondeltaeventtype.md - docs/models/toolexecutiondoneevent.md + - docs/models/toolexecutiondoneeventname.md - docs/models/toolexecutiondoneeventtype.md - docs/models/toolexecutionentry.md - docs/models/toolexecutionentryobject.md - docs/models/toolexecutionentrytype.md - docs/models/toolexecutionstartedevent.md + - docs/models/toolexecutionstartedeventname.md - docs/models/toolexecutionstartedeventtype.md - docs/models/toolfilechunk.md - docs/models/toolfilechunktype.md @@ -498,12 +509,14 @@ generatedFiles: - src/mistralai/models/agenthandoffdoneevent.py - src/mistralai/models/agenthandoffentry.py - src/mistralai/models/agenthandoffstartedevent.py + - src/mistralai/models/agents_api_v1_agents_deleteop.py - src/mistralai/models/agents_api_v1_agents_getop.py - src/mistralai/models/agents_api_v1_agents_listop.py - src/mistralai/models/agents_api_v1_agents_update_versionop.py - src/mistralai/models/agents_api_v1_agents_updateop.py - src/mistralai/models/agents_api_v1_conversations_append_streamop.py - src/mistralai/models/agents_api_v1_conversations_appendop.py + - src/mistralai/models/agents_api_v1_conversations_deleteop.py - src/mistralai/models/agents_api_v1_conversations_getop.py - src/mistralai/models/agents_api_v1_conversations_historyop.py - src/mistralai/models/agents_api_v1_conversations_listop.py @@ -677,12 +690,14 @@ generatedFiles: - src/mistralai/models/ocrpageobject.py - src/mistralai/models/ocrrequest.py - src/mistralai/models/ocrresponse.py + - src/mistralai/models/ocrtableobject.py - src/mistralai/models/ocrusageinfo.py - src/mistralai/models/outputcontentchunks.py - src/mistralai/models/paginationinfo.py - src/mistralai/models/prediction.py - src/mistralai/models/processingstatusout.py - src/mistralai/models/referencechunk.py + - src/mistralai/models/requestsource.py - src/mistralai/models/responsedoneevent.py - src/mistralai/models/responseerrorevent.py - src/mistralai/models/responseformat.py @@ -769,6 +784,10 @@ examples: application/json: {"object": "list"} "422": application/json: {} + userExample: + responses: + "200": + application/json: {"object": "list"} retrieve_model_v1_models__model_id__get: speakeasy-default-retrieve-model-v1-models-model-id-get: parameters: @@ -776,9 +795,16 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": true, "fine_tuning": false, "vision": false, "classification": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "function_calling": true, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} "422": application/json: {} + userExample: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} delete_model_v1_models__model_id__delete: speakeasy-default-delete-model-v1-models-model-id-delete: parameters: @@ -789,6 +815,13 @@ examples: application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} "422": application/json: {} + userExample: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} jobs_api_routes_fine_tuning_update_fine_tuned_model: speakeasy-default-jobs-api-routes-fine-tuning-update-fine-tuned-model: parameters: @@ -818,7 +851,7 @@ examples: agents_api_v1_conversations_start: speakeasy-default-agents-api-v1-conversations-start: requestBody: - application/json: {"inputs": "", "stream": false} + application/json: {"inputs": "", "stream": false, "completion_args": {"response_format": {"type": "text"}}} responses: "200": application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "agent.handoff", "previous_agent_id": "", "previous_agent_name": "", "next_agent_id": "", "next_agent_name": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} @@ -851,7 +884,7 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": [], "stream": false, "store": true, "handoff_execution": "server"} + application/json: {"inputs": [], "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} responses: "200": application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} @@ -883,7 +916,7 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server", "from_entry_id": ""} + application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} responses: "200": application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} @@ -892,7 +925,7 @@ examples: agents_api_v1_conversations_start_stream: speakeasy-default-agents-api-v1-conversations-start-stream: requestBody: - application/json: {"inputs": [{"object": "entry", "type": "function.result", "tool_call_id": "", "result": ""}], "stream": true} + application/json: {"inputs": [{"object": "entry", "type": "function.result", "tool_call_id": "", "result": ""}], "stream": true, "completion_args": {"response_format": {"type": "text"}}} responses: "422": application/json: {} @@ -902,7 +935,7 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server"} + application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} responses: "422": application/json: {} @@ -912,17 +945,17 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": [{"object": "entry", "type": "message.input", "role": "assistant", "content": "", "prefix": false}], "stream": true, "store": true, "handoff_execution": "server", "from_entry_id": ""} + application/json: {"inputs": [{"object": "entry", "type": "message.input", "role": "assistant", "content": "", "prefix": false}], "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} responses: "422": application/json: {} agents_api_v1_agents_create: speakeasy-default-agents-api-v1-agents-create: requestBody: - application/json: {"model": "LeBaron", "name": ""} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "LeBaron", "name": ""} responses: "200": - application/json: {"model": "Ranchero", "name": "", "object": "agent", "id": "", "version": 316961, "created_at": "2025-03-26T19:00:51.430Z", "updated_at": "2023-04-28T15:08:02.110Z"} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Ranchero", "name": "", "object": "agent", "id": "", "version": 316961, "versions": [818563, 316961], "created_at": "2025-03-26T19:00:51.430Z", "updated_at": "2023-04-28T15:08:02.110Z", "deployment_chat": false, "source": ""} "422": application/json: {} agents_api_v1_agents_list: @@ -933,7 +966,7 @@ examples: page_size: 20 responses: "200": - application/json: [{"model": "Impala", "name": "", "object": "agent", "id": "", "version": 43153, "created_at": "2024-04-26T15:54:09.954Z", "updated_at": "2024-02-11T18:27:55.607Z"}] + application/json: [{"model": "Impala", "name": "", "object": "agent", "id": "", "version": 43153, "versions": [43153, 439473], "created_at": "2024-04-26T15:54:09.954Z", "updated_at": "2024-02-11T18:27:55.607Z", "deployment_chat": true, "source": ""}] "422": application/json: {} agents_api_v1_agents_get: @@ -943,7 +976,7 @@ examples: agent_id: "" responses: "200": - application/json: {"model": "Silverado", "name": "", "object": "agent", "id": "", "version": 845972, "created_at": "2025-08-21T03:10:48.135Z", "updated_at": "2024-11-11T17:15:57.309Z"} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Silverado", "name": "", "object": "agent", "id": "", "version": 845972, "versions": [845972, 878771, 621094], "created_at": "2025-08-21T03:10:48.135Z", "updated_at": "2024-11-11T17:15:57.309Z", "deployment_chat": false, "source": ""} "422": application/json: {} agents_api_v1_agents_update: @@ -952,10 +985,10 @@ examples: path: agent_id: "" requestBody: - application/json: {} + application/json: {"completion_args": {"response_format": {"type": "text"}}} responses: "200": - application/json: {"model": "Model X", "name": "", "object": "agent", "id": "", "version": 799821, "created_at": "2025-10-20T17:35:08.067Z", "updated_at": "2023-11-16T08:47:13.265Z"} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Model X", "name": "", "object": "agent", "id": "", "version": 799821, "versions": [799821, 934063], "created_at": "2025-10-20T17:35:08.067Z", "updated_at": "2023-11-16T08:47:13.265Z", "deployment_chat": true, "source": ""} "422": application/json: {} agents_api_v1_agents_update_version: @@ -967,7 +1000,7 @@ examples: version: 157995 responses: "200": - application/json: {"model": "XTS", "name": "", "object": "agent", "id": "", "version": 310764, "created_at": "2023-05-08T23:29:06.216Z", "updated_at": "2023-05-16T19:20:05.735Z"} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "XTS", "name": "", "object": "agent", "id": "", "version": 310764, "versions": [], "created_at": "2023-05-08T23:29:06.216Z", "updated_at": "2023-05-16T19:20:05.735Z", "deployment_chat": false, "source": ""} "422": application/json: {} files_api_routes_upload_file: @@ -977,6 +1010,12 @@ examples: responses: "200": application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "batch_result", "source": "upload"} + userExample: + requestBody: + multipart/form-data: {"file": "x-file: example.file"} + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341"} files_api_routes_list_files: speakeasy-default-files-api-routes-list-files: parameters: @@ -986,6 +1025,15 @@ examples: responses: "200": application/json: {"data": [{"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "batch_error", "source": "upload"}], "object": "", "total": 999335} + userExample: + parameters: + query: + page: 0 + page_size: 100 + include_total: true + responses: + "200": + application/json: {"data": [{"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}, {"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}], "object": "list", "total": 2} files_api_routes_retrieve_file: speakeasy-default-files-api-routes-retrieve-file: parameters: @@ -994,6 +1042,13 @@ examples: responses: "200": application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "instruct", "source": "repository", "deleted": false} + userExample: + parameters: + path: + file_id: "f2a27685-ca4e-4dc2-9f2b-88c422c3e0f6" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341", "deleted": false} files_api_routes_delete_file: speakeasy-default-files-api-routes-delete-file: parameters: @@ -1002,6 +1057,13 @@ examples: responses: "200": application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "deleted": false} + userExample: + parameters: + path: + file_id: "3b6d45eb-e30b-416f-8019-f47e2e93d930" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "deleted": true} files_api_routes_download_file: speakeasy-default-files-api-routes-download-file: parameters: @@ -1020,6 +1082,15 @@ examples: responses: "200": application/json: {"url": "https://knotty-birdcage.net/"} + userExample: + parameters: + path: + file_id: "06a020ab-355c-49a6-b19d-304b7c01699f" + query: + expiry: 24 + responses: + "200": + application/json: {"url": "https://mistralaifilesapiprodswe.blob.core.windows.net/fine-tune/.../.../e85980c9409e4a46930436588f6292b0.jsonl?se=2025-10-04T14%3A16%3A17Z&sp=r&sv=2025-01-05&sr=b&sig=..."} jobs_api_routes_fine_tuning_get_fine_tuning_jobs: speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-jobs: parameters: @@ -1074,7 +1145,7 @@ examples: jobs_api_routes_batch_create_batch_job: speakeasy-default-jobs-api-routes-batch-create-batch-job: requestBody: - application/json: {"input_files": ["fe3343a2-3b8d-404b-ba32-a78dede2614a"], "endpoint": "/v1/moderations", "timeout_hours": 24} + application/json: {"input_files": ["fe3343a2-3b8d-404b-ba32-a78dede2614a"], "endpoint": "/v1/moderations", "model": "mistral-small-latest", "timeout_hours": 24} responses: "200": application/json: {"id": "", "object": "batch", "input_files": ["7b2553d8-e17f-4df5-a862-a1678f6b5271", "8c618d9f-7d82-42ba-a284-d57d84f50a58", "c042f996-e842-441d-ae47-4e0850334e41"], "endpoint": "", "errors": [{"message": "", "count": 1}], "status": "SUCCESS", "created_at": 395527, "total_requests": 166919, "completed_requests": 258552, "succeeded_requests": 480980, "failed_requests": 684176} @@ -1097,7 +1168,7 @@ examples: chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} @@ -1106,7 +1177,7 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-small-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} responses: "422": application/json: {} @@ -1119,10 +1190,16 @@ examples: application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} + userExample: + requestBody: + application/json: {"model": "codestral-latest", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} + responses: + "200": + application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} stream_fim: speakeasy-default-stream-fim: requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-latest", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} responses: "422": application/json: {} @@ -1135,10 +1212,16 @@ examples: application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} + userExample: + requestBody: + application/json: {"stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}, "agent_id": ""} + responses: + "200": + application/json: {"id": "cf79f7daaee244b1a0ae5c7b1444424a", "object": "chat.completion", "model": "mistral-medium-latest", "usage": {"prompt_tokens": 24, "completion_tokens": 27, "total_tokens": 51, "prompt_audio_seconds": {}}, "created": 1759500534, "choices": [{"index": 0, "message": {"content": "Arrr, the scallywag Claude Monet be the finest French painter to ever splash colors on a canvas, savvy?", "tool_calls": null, "prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} stream_agents: speakeasy-default-stream-agents: requestBody: - application/json: {"stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} + application/json: {"stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}, "agent_id": ""} responses: "422": application/json: {} @@ -1151,6 +1234,12 @@ examples: application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}, {"object": "embedding", "embedding": [0.4, 0.5, 0.6], "index": 1}]} "422": application/json: {} + userExample: + requestBody: + application/json: {"model": "mistral-embed", "input": ["Embed this sentence.", "As well as this one."]} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "list", "model": "mistral-embed", "usage": {"prompt_tokens": 15, "completion_tokens": 0, "total_tokens": 15, "prompt_audio_seconds": null}, "data": [{"object": "embedding", "embedding": [-0.016632080078125, 0.0701904296875, 0.03143310546875, 0.01309967041015625, 0.0202789306640625], "index": 0}, {"object": "embedding", "embedding": [-0.0230560302734375, 0.039337158203125, 0.0521240234375, -0.0184783935546875, 0.034271240234375], "index": 1}]} moderations_v1_moderations_post: speakeasy-default-moderations-v1-moderations-post: requestBody: @@ -1160,6 +1249,12 @@ examples: application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Corvette", "results": [{}]} "422": application/json: {} + userExample: + requestBody: + application/json: {"model": "mistral-moderation-latest", "input": ""} + responses: + "200": + application/json: {"id": "4d71ae510af942108ef7344f903e2b88", "model": "mistral-moderation-latest", "results": [{"categories": {"sexual": false, "hate_and_discrimination": false, "violence_and_threats": false, "dangerous_and_criminal_content": false, "selfharm": false, "health": false, "financial": false, "law": false, "pii": false}, "category_scores": {"sexual": 0.0011335690505802631, "hate_and_discrimination": 0.0030753696337342262, "violence_and_threats": 0.0003569706459529698, "dangerous_and_criminal_content": 0.002251847181469202, "selfharm": 0.00017952796770259738, "health": 0.0002780309587251395, "financial": 0.00008481103577651083, "law": 0.00004539786823443137, "pii": 0.0023967307060956955}}, {"categories": {"sexual": false, "hate_and_discrimination": false, "violence_and_threats": false, "dangerous_and_criminal_content": false, "selfharm": false, "health": false, "financial": false, "law": false, "pii": false}, "category_scores": {"sexual": 0.000626334105618298, "hate_and_discrimination": 0.0013670255430042744, "violence_and_threats": 0.0002611903182696551, "dangerous_and_criminal_content": 0.0030753696337342262, "selfharm": 0.00010889690747717395, "health": 0.00015843621804378927, "financial": 0.000191104321856983, "law": 0.00004006369272246957, "pii": 0.0035936026833951473}}]} chat_moderations_v1_chat_moderations_post: speakeasy-default-chat-moderations-v1-chat-moderations-post: requestBody: @@ -1169,10 +1264,16 @@ examples: application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Explorer", "results": [{}]} "422": application/json: {} + userExample: + requestBody: + application/json: {"input": [{"content": "", "role": "tool"}], "model": "LeBaron"} + responses: + "200": + application/json: {"id": "352bce1a55814127a3b0bc4fb8f02a35", "model": "mistral-moderation-latest", "results": [{"categories": {"sexual": false, "hate_and_discrimination": false, "violence_and_threats": false, "dangerous_and_criminal_content": false, "selfharm": false, "health": false, "financial": false, "law": false, "pii": false}, "category_scores": {"sexual": 0.0010322310263291001, "hate_and_discrimination": 0.001597845577634871, "violence_and_threats": 0.00020342698553577065, "dangerous_and_criminal_content": 0.0029810327105224133, "selfharm": 0.00017952796770259738, "health": 0.0002959570847451687, "financial": 0.000079673009167891, "law": 0.00004539786823443137, "pii": 0.004198795650154352}}]} classifications_v1_classifications_post: speakeasy-default-classifications-v1-classifications-post: requestBody: - application/json: {"model": "Silverado", "input": [""]} + application/json: {"model": "mistral-moderation-latest", "input": [""]} responses: "200": application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "ATS", "results": [{}, {"key": {"scores": {"key": 2080.19}}}]} @@ -1196,6 +1297,12 @@ examples: application/json: {"pages": [{"index": 944919, "markdown": "", "images": [], "dimensions": {"dpi": 984283, "height": 453411, "width": 398292}}], "model": "Wrangler", "usage_info": {"pages_processed": 47064}} "422": application/json: {} + userExample: + requestBody: + application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} + responses: + "200": + application/json: {"pages": [{"index": 1, "markdown": "# LEVERAGING UNLABELED DATA TO PREDICT OUT-OF-DISTRIBUTION PERFORMANCE\nSaurabh Garg*
Carnegie Mellon University
sgarg2@andrew.cmu.edu
Sivaraman Balakrishnan
Carnegie Mellon University
sbalakri@andrew.cmu.edu
Zachary C. Lipton
Carnegie Mellon University
zlipton@andrew.cmu.edu\n## Behnam Neyshabur\nGoogle Research, Blueshift team
neyshabur@google.com\nHanie Sedghi
Google Research, Brain team
hsedghi@google.com\n#### Abstract\nReal-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions that may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a threshold on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (WILDS, ImageNet, BREEDS, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2-4 \\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works ${ }^{1}$.\n## 1 INTRODUCTION\nMachine learning models deployed in the real world typically encounter examples from previously unseen distributions. While the IID assumption enables us to evaluate models using held-out data from the source distribution (from which training data is sampled), this estimate is no longer valid in presence of a distribution shift. Moreover, under such shifts, model accuracy tends to degrade (Szegedy et al., 2014; Recht et al., 2019; Koh et al., 2021). Commonly, the only data available to the practitioner are a labeled training set (source) and unlabeled deployment-time data which makes the problem more difficult. In this setting, detecting shifts in the distribution of covariates is known to be possible (but difficult) in theory (Ramdas et al., 2015), and in practice (Rabanser et al., 2018). However, producing an optimal predictor using only labeled source and unlabeled target data is well-known to be impossible absent further assumptions (Ben-David et al., 2010; Lipton et al., 2018).\nTwo vital questions that remain are: (i) the precise conditions under which we can estimate a classifier's target-domain accuracy; and (ii) which methods are most practically useful. To begin, the straightforward way to assess the performance of a model under distribution shift would be to collect labeled (target domain) examples and then to evaluate the model on that data. However, collecting fresh labeled data from the target distribution is prohibitively expensive and time-consuming, especially if the target distribution is non-stationary. Hence, instead of using labeled data, we aim to use unlabeled data from the target distribution, that is comparatively abundant, to predict model performance. Note that in this work, our focus is not to improve performance on the target but, rather, to estimate the accuracy on the target for a given classifier.\n[^0]: Work done in part while Saurabh Garg was interning at Google ${ }^{1}$ Code is available at [https://github.com/saurabhgarg1996/ATC_code](https://github.com/saurabhgarg1996/ATC_code).\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 2, "markdown": "![img-0.jpeg](img-0.jpeg)\nFigure 1: Illustration of our proposed method ATC. Left: using source domain validation data, we identify a threshold on a score (e.g. negative entropy) computed on model confidence such that fraction of examples above the threshold matches the validation set accuracy. ATC estimates accuracy on unlabeled target data as the fraction of examples with the score above the threshold. Interestingly, this threshold yields accurate estimates on a wide set of target distributions resulting from natural and synthetic shifts. Right: Efficacy of ATC over previously proposed approaches on our testbed with a post-hoc calibrated model. To obtain errors on the same scale, we rescale all errors with Average Confidence (AC) error. Lower estimation error is better. See Table 1 for exact numbers and comparison on various types of distribution shift. See Sec. 5 for details on our testbed.\nRecently, numerous methods have been proposed for this purpose (Deng & Zheng, 2021; Chen et al., 2021b; Jiang et al., 2021; Deng et al., 2021; Guillory et al., 2021). These methods either require calibration on the target domain to yield consistent estimates (Jiang et al., 2021; Guillory et al., 2021) or additional labeled data from several target domains to learn a linear regression function on a distributional distance that then predicts model performance (Deng et al., 2021; Deng & Zheng, 2021; Guillory et al., 2021). However, methods that require calibration on the target domain typically yield poor estimates since deep models trained and calibrated on source data are not, in general, calibrated on a (previously unseen) target domain (Ovadia et al., 2019). Besides, methods that leverage labeled data from target domains rely on the fact that unseen target domains exhibit strong linear correlation with seen target domains on the underlying distance measure and, hence, can be rendered ineffective when such target domains with labeled data are unavailable (in Sec. 5.1 we demonstrate such a failure on a real-world distribution shift problem). Therefore, throughout the paper, we assume access to labeled source data and only unlabeled data from target domain(s).\nIn this work, we first show that absent assumptions on the source classifier or the nature of the shift, no method of estimating accuracy will work generally (even in non-contrived settings). To estimate accuracy on target domain perfectly, we highlight that even given perfect knowledge of the labeled source distribution (i.e., $p_{s}(x, y)$ ) and unlabeled target distribution (i.e., $p_{t}(x)$ ), we need restrictions on the nature of the shift such that we can uniquely identify the target conditional $p_{t}(y \\mid x)$. Thus, in general, identifying the accuracy of the classifier is as hard as identifying the optimal predictor.\nSecond, motivated by the superiority of methods that use maximum softmax probability (or logit) of a model for Out-Of-Distribution (OOD) detection (Hendrycks & Gimpel, 2016; Hendrycks et al., 2019), we propose a simple method that leverages softmax probability to predict model performance. Our method, Average Thresholded Confidence (ATC), learns a threshold on a score (e.g., maximum confidence or negative entropy) of model confidence on validation source data and predicts target domain accuracy as the fraction of unlabeled target points that receive a score above that threshold. ATC selects a threshold on validation source data such that the fraction of source examples that receive the score above the threshold match the accuracy of those examples. Our primary contribution in ATC is the proposal of obtaining the threshold and observing its efficacy on (practical) accuracy estimation. Importantly, our work takes a step forward in positively answering the question raised in Deng & Zheng (2021); Deng et al. (2021) about a practical strategy to select a threshold that enables accuracy prediction with thresholded model confidence.\n", "images": [{"id": "img-0.jpeg", "top_left_x": 292, "top_left_y": 217, "bottom_right_x": 1405, "bottom_right_y": 649, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 3, "markdown": "", "images": [], "dimensions": {"dpi": 539192, "height": 944919, "width": 247256}}, {"index": 27, "markdown": "![img-8.jpeg](img-8.jpeg)\nFigure 9: Scatter plot of predicted accuracy versus (true) OOD accuracy for vision datasets except MNIST with a ResNet50 model. Results reported by aggregating MAE numbers over 4 different seeds.\n", "images": [{"id": "img-8.jpeg", "top_left_x": 290, "top_left_y": 226, "bottom_right_x": 1405, "bottom_right_y": 1834, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 28, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 6.60 | 5.74 | 9.88 | 6.89 | 7.25 | 6.07 | 4.77 | 3.21 | 3.02 | 2.99 | 2.85 | | | | (0.35) | (0.30) | (0.16) | (0.13) | (0.15) | (0.16) | (0.13) | (0.49) | (0.40) | (0.37) | (0.29) | | | Synthetic | 12.33 | 10.20 | 16.50 | 11.91 | 13.87 | 11.08 | 6.55 | 4.65 | 4.25 | 4.21 | 3.87 | | | | (0.51) | (0.48) | (0.26) | (0.17) | (0.18) | (0.17) | (0.35) | (0.55) | (0.55) | (0.55) | (0.75) | | CIFAR100 | Synthetic | 13.69 | 11.51 | 23.61 | 13.10 | 14.60 | 10.14 | 9.85 | 5.50 | 4.75 | 4.72 | 4.94 | | | | (0.55) | (0.41) | (1.16) | (0.80) | (0.77) | (0.64) | (0.57) | (0.70) | (0.73) | (0.74) | (0.74) | | ImageNet200 | Natural | 12.37 | 8.19 | 22.07 | 8.61 | 15.17 | 7.81 | 5.13 | 4.37 | 2.04 | 3.79 | 1.45 | | | | (0.25) | (0.33) | (0.08) | (0.25) | (0.11) | (0.29) | (0.08) | (0.39) | (0.24) | (0.30) | (0.27) | | | Synthetic | 19.86 | 12.94 | 32.44 | 13.35 | 25.02 | 12.38 | 5.41 | 5.93 | 3.09 | 5.00 | 2.68 | | | | (1.38) | (1.81) | (1.00) | (1.30) | (1.10) | (1.38) | (0.89) | (1.38) | (0.87) | (1.28) | (0.45) | | ImageNet | Natural | 7.77 | 6.50 | 18.13 | 6.02 | 8.13 | 5.76 | 6.23 | 3.88 | 2.17 | 2.06 | 0.80 | | | | (0.27) | (0.33) | (0.23) | (0.34) | (0.27) | (0.37) | (0.41) | (0.53) | (0.62) | (0.54) | (0.44) | | | Synthetic | 13.39 | 10.12 | 24.62 | 8.51 | 13.55 | 7.90 | 6.32 | 3.34 | 2.53 | 2.61 | 4.89 | | | | (0.53) | (0.63) | (0.64) | (0.71) | (0.61) | (0.72) | (0.33) | (0.53) | (0.36) | (0.33) | (0.83) | | FMoW-WILDS | Natural | 5.53 | 4.31 | 33.53 | 12.84 | 5.94 | 4.45 | 5.74 | 3.06 | 2.70 | 3.02 | 2.72 | | | | (0.33) | (0.63) | (0.13) | (12.06) | (0.36) | (0.77) | (0.55) | (0.36) | (0.54) | (0.35) | (0.44) | | RxRx1-WILDS | Natural | 5.80 | 5.72 | 7.90 | 4.84 | 5.98 | 5.98 | 6.03 | 4.66 | 4.56 | 4.41 | 4.47 | | | | (0.17) | (0.15) | (0.24) | (0.09) | (0.15) | (0.13) | (0.08) | (0.38) | (0.38) | (0.31) | (0.26) | | Amazon-WILDS | Natural | 2.40 | 2.29 | 8.01 | 2.38 | 2.40 | 2.28 | 17.87 | 1.65 | 1.62 | 1.60 | 1.59 | | | | (0.08) | (0.09) | (0.53) | (0.17) | (0.09) | (0.09) | (0.18) | (0.06) | (0.05) | (0.14) | (0.15) | | CivilCom.-WILDS | Natural | 12.64 | 10.80 | 16.76 | 11.03 | 13.31 | 10.99 | 16.65 | | 7.14 | | | | | | (0.52) | (0.48) | (0.53) | (0.49) | (0.52) | (0.49) | (0.25) | | (0.41) | | | | MNIST | Natural | 18.48 | 15.99 | 21.17 | 14.81 | 20.19 | 14.56 | 24.42 | 5.02 | 2.40 | 3.14 | 3.50 | | | | (0.45) | (1.53) | (0.24) | (3.89) | (0.23) | (3.47) | (0.41) | (0.44) | (1.83) | (0.49) | (0.17) | | ENTITY-13 | Same | 16.23 | 11.14 | 24.97 | 10.88 | 19.08 | 10.47 | 10.71 | 5.39 | 3.88 | 4.58 | 4.19 | | | | (0.77) | (0.65) | (0.70) | (0.77) | (0.65) | (0.72) | (0.74) | (0.92) | (0.61) | (0.85) | (0.16) | | | Novel | 28.53 | 22.02 | 38.33 | 21.64 | 32.43 | 21.22 | 20.61 | 13.58 | 10.28 | 12.25 | 6.63 | | | | (0.82) | (0.68) | (0.75) | (0.86) | (0.69) | (0.80) | (0.60) | (1.15) | (1.34) | (1.21) | (0.93) | | ENTITY-30 | Same | 18.59 | 14.46 | 28.82 | 14.30 | 21.63 | 13.46 | 12.92 | 9.12 | 7.75 | 8.15 | 7.64 | | | | (0.51) | (0.52) | (0.43) | (0.71) | (0.37) | (0.59) | (0.14) | (0.62) | (0.72) | (0.68) | (0.88) | | | Novel | 32.34 | 26.85 | 44.02 | 26.27 | 36.82 | 25.42 | 23.16 | 17.75 | 14.30 | 15.60 | 10.57 | | | | (0.60) | (0.58) | (0.56) | (0.79) | (0.47) | (0.68) | (0.12) | (0.76) | (0.85) | (0.86) | (0.86) | | NONLIVING-26 | Same | 18.66 | 17.17 | 26.39 | 16.14 | 19.86 | 15.58 | 16.63 | 10.87 | 10.24 | 10.07 | 10.26 | | | | (0.76) | (0.74) | (0.82) | (0.81) | (0.67) | (0.76) | (0.45) | (0.98) | (0.83) | (0.92) | (1.18) | | | Novel | 33.43 | 31.53 | 41.66 | 29.87 | 35.13 | 29.31 | 29.56 | 21.70 | 20.12 | 19.08 | 18.26 | | | | (0.67) | (0.65) | (0.67) | (0.71) | (0.54) | (0.64) | (0.21) | (0.86) | (0.75) | (0.82) | (1.12) | | LIVING-17 | Same | 12.63 | 11.05 | 18.32 | 10.46 | 14.43 | 10.14 | 9.87 | 4.57 | 3.95 | 3.81 | 4.21 | | | | (1.25) | (1.20) | (1.01) | (1.12) | (1.11) | (1.16) | (0.61) | (0.71) | (0.48) | (0.22) | (0.53) | | | Novel | 29.03 | 26.96 | 35.67 | 26.11 | 31.73 | 25.73 | 23.53 | 16.15 | 14.49 | 12.97 | 11.39 | | | | (1.44) | (1.38) | (1.09) | (1.27) | (1.19) | (1.35) | (0.52) | (1.36) | (1.46) | (1.52) | (1.72) |\nTable 3: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. For language datasets, we use DistilBERT-base-uncased, for vision dataset we report results with DenseNet model with the exception of MNIST where we use FCN. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 29, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 7.14 | 6.20 | 10.25 | 7.06 | 7.68 | 6.35 | 5.74 | 4.02 | 3.85 | 3.76 | 3.38 | | | | (0.14) | (0.11) | (0.31) | (0.33) | (0.28) | (0.27) | (0.25) | (0.38) | (0.30) | (0.33) | (0.32) | | | Synthetic | 12.62 | 10.75 | 16.50 | 11.91 | 13.93 | 11.20 | 7.97 | 5.66 | 5.03 | 4.87 | 3.63 | | | | (0.76) | (0.71) | (0.28) | (0.24) | (0.29) | (0.28) | (0.13) | (0.64) | (0.71) | (0.71) | (0.62) | | CIFAR100 | Synthetic | 12.77 | 12.34 | 16.89 | 12.73 | 11.18 | 9.63 | 12.00 | 5.61 | 5.55 | 5.65 | 5.76 | | | | (0.43) | (0.68) | (0.20) | (2.59) | (0.35) | (1.25) | (0.48) | (0.51) | (0.55) | (0.35) | (0.27) | | ImageNet200 | Natural | 12.63 | 7.99 | 23.08 | 7.22 | 15.40 | 6.33 | 5.00 | 4.60 | 1.80 | 4.06 | 1.38 | | | | (0.59) | (0.47) | (0.31) | (0.22) | (0.42) | (0.24) | (0.36) | (0.63) | (0.17) | (0.69) | (0.29) | | | Synthetic | 20.17 | 11.74 | 33.69 | 9.51 | 25.49 | 8.61 | 4.19 | 5.37 | 2.78 | 4.53 | 3.58 | | | | (0.74) | (0.80) | (0.73) | (0.51) | (0.66) | (0.50) | (0.14) | (0.88) | (0.23) | (0.79) | (0.33) | | ImageNet | Natural | 8.09 | 6.42 | 21.66 | 5.91 | 8.53 | 5.21 | 5.90 | 3.93 | 1.89 | 2.45 | 0.73 | | | | (0.25) | (0.28) | (0.38) | (0.22) | (0.26) | (0.25) | (0.44) | (0.26) | (0.21) | (0.16) | (0.10) | | | Synthetic | 13.93 | 9.90 | 28.05 | 7.56 | 13.82 | 6.19 | 6.70 | 3.33 | 2.55 | 2.12 | 5.06 | | | | (0.14) | (0.23) | (0.39) | (0.13) | (0.31) | (0.07) | (0.52) | (0.25) | (0.25) | (0.31) | (0.27) | | FMoW-WILDS | Natural | 5.15 | 3.55 | 34.64 | 5.03 | 5.58 | 3.46 | 5.08 | 2.59 | 2.33 | 2.52 | 2.22 | | | | (0.19) | (0.41) | (0.22) | (0.29) | (0.17) | (0.37) | (0.46) | (0.32) | (0.28) | (0.25) | (0.30) | | RxRx1-WILDS | Natural | 6.17 | 6.11 | 21.05 | 5.21 | 6.54 | 6.27 | 6.82 | 5.30 | 5.20 | 5.19 | 5.63 | | | | (0.20) | (0.24) | (0.31) | (0.18) | (0.21) | (0.20) | (0.31) | (0.30) | (0.44) | (0.43) | (0.55) | | Entity-13 | Same | 18.32 | 14.38 | 27.79 | 13.56 | 20.50 | 13.22 | 16.09 | 9.35 | 7.50 | 7.80 | 6.94 | | | | (0.29) | (0.53) | (1.18) | (0.58) | (0.47) | (0.58) | (0.84) | (0.79) | (0.65) | (0.62) | (0.71) | | | Novel | 28.82 | 24.03 | 38.97 | 22.96 | 31.66 | 22.61 | 25.26 | 17.11 | 13.96 | 14.75 | 9.94 | | | | (0.30) | (0.55) | (1.32) | (0.59) | (0.54) | (0.58) | (1.08) | (0.93) | (0.64) | (0.78) | | | Entity-30 | Same | 16.91 | 14.61 | 26.84 | 14.37 | 18.60 | 13.11 | 13.74 | 8.54 | 7.94 | 7.77 | 8.04 | | | | (1.33) | (1.11) | (2.15) | (1.34) | (1.69) | (1.30) | (1.07) | (1.47) | (1.38) | (1.44) | (1.51) | | | Novel | 28.66 | 25.83 | 39.21 | 25.03 | 30.95 | 23.73 | 23.15 | 15.57 | 13.24 | 12.44 | 11.05 | | | | (1.16) | (0.88) | (2.03) | (1.11) | (1.64) | (1.11) | (0.51) | (1.44) | (1.15) | (1.26) | (1.13) | | NonLIVING-26 | Same | 17.43 | 15.95 | 27.70 | 15.40 | 18.06 | 14.58 | 16.99 | 10.79 | 10.13 | 10.05 | 10.29 | | | | (0.90) | (0.86) | (0.90) | (0.69) | (1.00) | (0.78) | (1.25) | (0.62) | (0.32) | (0.46) | (0.79) | | | Novel | 29.51 | 27.75 | 40.02 | 26.77 | 30.36 | 25.93 | 27.70 | 19.64 | 17.75 | 16.90 | 15.69 | | | | (0.86) | (0.82) | (0.76) | (0.82) | (0.95) | (0.80) | (1.42) | (0.68) | (0.53) | (0.60) | (0.83) | | LIVING-17 | Same | 14.28 | 12.21 | 23.46 | 11.16 | 15.22 | 10.78 | 10.49 | 4.92 | 4.23 | 4.19 | 4.73 | | | | (0.96) | (0.93) | (1.16) | (0.90) | (0.96) | (0.99) | (0.97) | (0.57) | (0.42) | (0.35) | (0.24) | | | Novel | 28.91 | 26.35 | 38.62 | 24.91 | 30.32 | 24.52 | 22.49 | 15.42 | 13.02 | 12.29 | 10.34 | | | | (0.66) | (0.73) | (1.01) | (0.61) | (0.59) | (0.74) | (0.85) | (0.59) | (0.53) | (0.73) | (0.62) |\nTable 4: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift for ResNet model. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}], "model": "mistral-ocr-2503-completion", "usage_info": {"pages_processed": 29, "doc_size_bytes": null}} libraries_list_v1: speakeasy-default-libraries-list-v1: responses: @@ -1371,7 +1478,7 @@ examples: path: library_id: "36de3a24-5b1c-4c8f-9d84-d5642205a976" requestBody: - application/json: {"org_id": "aadd9ae1-f285-4437-884a-091c77efa6fd", "level": "Viewer", "share_with_uuid": "0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", "share_with_type": "User"} + application/json: {"level": "Viewer", "share_with_uuid": "0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", "share_with_type": "User"} responses: "200": application/json: {"library_id": "45b3a5b2-8b81-4453-9130-ded7f1e5a366", "org_id": "0fa6e542-f04b-431e-a1be-76a9a92b0e68", "role": "", "share_with_type": "", "share_with_uuid": "cdbcc0c5-e577-4880-8ed3-f919421d4fc5"} @@ -1383,7 +1490,7 @@ examples: path: library_id: "709e3cad-9fb2-4f4e-bf88-143cf1808107" requestBody: - application/json: {"org_id": "0814a235-c2d0-4814-875a-4b85f93d3dc7", "share_with_uuid": "b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", "share_with_type": "User"} + application/json: {"share_with_uuid": "b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", "share_with_type": "User"} responses: "200": application/json: {"library_id": "7f9c6af4-e362-4cf1-9363-0409d51c2dfa", "org_id": "6b2cac3a-b29c-4d8f-bebb-0db06ec1bf97", "role": "", "share_with_type": "", "share_with_uuid": "618c78f1-41ca-45c3-8ef2-7d78898c7061"} @@ -1396,9 +1503,32 @@ examples: responses: "200": application/json: {"model": "Beetle", "text": "", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "language": ""} + userExample: + requestBody: + multipart/form-data: {"model": "voxtral-mini-latest", "stream": false} + responses: + "200": + application/json: {"model": "voxtral-mini-2507", "text": "This week, I traveled to Chicago to deliver my final farewell address to the nation, following in the tradition of presidents before me. It was an opportunity to say thank you. Whether we've seen eye to eye or rarely agreed at all, my conversations with you, the American people, in living rooms, in schools, at farms and on factory floors, at diners and on distant military outposts, All these conversations are what have kept me honest, kept me inspired, and kept me going. Every day, I learned from you. You made me a better President, and you made me a better man.\nOver the course of these eight years, I've seen the goodness, the resilience, and the hope of the American people. I've seen neighbors looking out for each other as we rescued our economy from the worst crisis of our lifetimes. I've hugged cancer survivors who finally know the security of affordable health care. I've seen communities like Joplin rebuild from disaster, and cities like Boston show the world that no terrorist will ever break the American spirit. I've seen the hopeful faces of young graduates and our newest military officers. I've mourned with grieving families searching for answers. And I found grace in a Charleston church. I've seen our scientists help a paralyzed man regain his sense of touch, and our wounded warriors walk again. I've seen our doctors and volunteers rebuild after earthquakes and stop pandemics in their tracks. I've learned from students who are building robots and curing diseases, and who will change the world in ways we can't even imagine. I've seen the youngest of children remind us of our obligations to care for our refugees, to work in peace, and above all, to look out for each other.\nThat's what's possible when we come together in the slow, hard, sometimes frustrating, but always vital work of self-government. But we can't take our democracy for granted. All of us, regardless of party, should throw ourselves into the work of citizenship. Not just when there is an election. Not just when our own narrow interest is at stake. But over the full span of a lifetime. If you're tired of arguing with strangers on the Internet, try to talk with one in real life. If something needs fixing, lace up your shoes and do some organizing. If you're disappointed by your elected officials, then grab a clipboard, get some signatures, and run for office yourself.\nOur success depends on our participation, regardless of which way the pendulum of power swings. It falls on each of us to be guardians of our democracy, to embrace the joyous task we've been given to continually try to improve this great nation of ours. Because for all our outward differences, we all share the same proud title – citizen.\nIt has been the honor of my life to serve you as President. Eight years later, I am even more optimistic about our country's promise. And I look forward to working along your side as a citizen for all my days that remain.\nThanks, everybody. God bless you. And God bless the United States of America.\n", "segments": [], "usage": {"prompt_tokens": 4, "completion_tokens": 635, "total_tokens": 3264, "prompt_audio_seconds": 203}, "language": "en"} audio_api_v1_transcriptions_post_stream: speakeasy-default-audio-api-v1-transcriptions-post-stream: requestBody: multipart/form-data: {"model": "Camry", "stream": true} + agents_api_v1_conversations_delete: + speakeasy-default-agents-api-v1-conversations-delete: + parameters: + path: + conversation_id: "" + responses: + "422": + application/json: {} + agents_api_v1_agents_delete: + speakeasy-default-agents-api-v1-agents-delete: + parameters: + path: + agent_id: "" + responses: + "422": + application/json: {} examplesVersion: 1.0.2 generatedTests: {} +releaseNotes: "## SDK Changes Detected:\n* `mistral.beta.libraries.create()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.get()`: `response` **Changed** **Breaking** :warning:\n* `mistral.models.list()`: \n * `response.data.[].[base].capabilities` **Changed**\n * `error.status[422]` **Removed** **Breaking** :warning:\n* `mistral.files.list()`: \n * `request.include_total` **Added**\n * `response.total` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.start()`: \n * `request` **Changed** **Breaking** :warning:\n * `response.outputs.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.accesses.delete()`: \n * `request.org_id` **Changed**\n * `response.share_with_uuid` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.accesses.update_or_create()`: \n * `request.org_id` **Changed**\n * `response.share_with_uuid` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.append()`: \n * `request.inputs.[array].[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n * `response.outputs.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.accesses.list()`: `response.data.[].share_with_uuid` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.restart()`: \n * `request` **Changed** **Breaking** :warning:\n * `response.outputs.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.update()`: \n * `request.attributes` **Added**\n * `response` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.upload()`: `response` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.list()`: \n * `request.filters_attributes` **Added**\n * `response.data.[]` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.update()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.delete()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.get()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.get_history()`: `response.entries.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.list()`: `response.data.[].owner_id` **Changed** **Breaking** :warning:\n* `mistral.models.retrieve()`: `response.[base].capabilities` **Changed**\n* `mistral.agents.complete()`: `request.metadata` **Added**\n* `mistral.beta.agents.get()`: \n * `request.agent_version` **Added**\n * `response` **Changed**\n* `mistral.beta.agents.list()`: \n * `request` **Changed**\n * `response.[]` **Changed**\n* `mistral.beta.agents.update_version()`: `response` **Changed**\n* `mistral.beta.agents.delete()`: **Added**\n* `mistral.beta.conversations.list()`: \n * `request.metadata` **Added**\n * `response.[]` **Changed**\n* `mistral.beta.conversations.get()`: `response` **Changed**\n* `mistral.beta.agents.update()`: \n * `request` **Changed**\n * `response` **Changed**\n* `mistral.beta.conversations.delete()`: **Added**\n* `mistral.chat.complete()`: `request.metadata` **Added**\n* `mistral.fim.complete()`: `request.metadata` **Added**\n* `mistral.beta.agents.create()`: \n * `request.metadata` **Added**\n * `response` **Changed**\n* `mistral.ocr.process()`: \n * `request` **Changed**\n * `response.pages.[]` **Changed**\n" diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 116b0e26..f206b927 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -21,7 +21,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.9.11 + version: 1.10.0 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index f4582991..48c4bf7b 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:eefc1f0b6a5e9ec673d317d61cad766290710b5fc369412491b75f732cccfedd - sourceBlobDigest: sha256:97767522559603de92a9738938e522cea4d558b2a854500acf6fe8d81f8ccfb8 + sourceRevisionDigest: sha256:cb63bd997cefe7b3b36e91a475df57cb779bf79f183340e0713d8ffb16a2dabc + sourceBlobDigest: sha256:f0caa06fb9bcadc35b097aa5ff69bb5020937652df311722b5e44a282bd95d6d tags: - latest - - speakeasy-sdk-regen-1759420102 + - speakeasy-sdk-regen-1765914268 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:eefc1f0b6a5e9ec673d317d61cad766290710b5fc369412491b75f732cccfedd - sourceBlobDigest: sha256:97767522559603de92a9738938e522cea4d558b2a854500acf6fe8d81f8ccfb8 + sourceRevisionDigest: sha256:cb63bd997cefe7b3b36e91a475df57cb779bf79f183340e0713d8ffb16a2dabc + sourceBlobDigest: sha256:f0caa06fb9bcadc35b097aa5ff69bb5020937652df311722b5e44a282bd95d6d codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:8ed158c9c1ed8252f86b620219dd93e9650b45e7c6403cda7fdd9b4ee0d17dac + codeSamplesRevisionDigest: sha256:b1eacff97275a14ab0c2143e07bdfa4f4bd58f5370b2f106bcc6ada92b754d08 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.606.10 diff --git a/README.md b/README.md index 65b0f7b0..1bc889c6 100644 --- a/README.md +++ b/README.md @@ -145,12 +145,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.chat.complete(model="mistral-small-latest", messages=[ + res = mistral.chat.complete(model="mistral-large-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ], stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -171,12 +173,14 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.chat.complete_async(model="mistral-small-latest", messages=[ + res = await mistral.chat.complete_async(model="mistral-large-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ], stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -252,7 +256,9 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="", stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -278,7 +284,9 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="", stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -448,7 +456,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA #### [audio.transcriptions](docs/sdks/transcriptions/README.md) * [complete](docs/sdks/transcriptions/README.md#complete) - Create Transcription -* [stream](docs/sdks/transcriptions/README.md#stream) - Create streaming transcription (SSE) +* [stream](docs/sdks/transcriptions/README.md#stream) - Create Streaming Transcription (SSE) ### [batch](docs/sdks/batch/README.md) @@ -469,6 +477,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [list](docs/sdks/mistralagents/README.md#list) - List agent entities. * [get](docs/sdks/mistralagents/README.md#get) - Retrieve an agent entity. * [update](docs/sdks/mistralagents/README.md#update) - Update an agent entity. +* [delete](docs/sdks/mistralagents/README.md#delete) - Delete an agent entity. * [update_version](docs/sdks/mistralagents/README.md#update_version) - Update an agent version. #### [beta.conversations](docs/sdks/conversations/README.md) @@ -476,6 +485,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [start](docs/sdks/conversations/README.md#start) - Create a conversation and append entries to it. * [list](docs/sdks/conversations/README.md#list) - List all created conversations. * [get](docs/sdks/conversations/README.md#get) - Retrieve a conversation information. +* [delete](docs/sdks/conversations/README.md#delete) - Delete a conversation. * [append](docs/sdks/conversations/README.md#append) - Append new entries to an existing conversation. * [get_history](docs/sdks/conversations/README.md#get_history) - Retrieve all entries in a conversation. * [get_messages](docs/sdks/conversations/README.md#get_messages) - Retrieve all messages in a conversation. @@ -500,7 +510,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA #### [beta.libraries.documents](docs/sdks/documents/README.md) -* [list](docs/sdks/documents/README.md#list) - List document in a given library. +* [list](docs/sdks/documents/README.md#list) - List documents in a given library. * [upload](docs/sdks/documents/README.md#upload) - Upload a new document. * [get](docs/sdks/documents/README.md#get) - Retrieve the metadata of a specific document. * [update](docs/sdks/documents/README.md#update) - Update the metadata of a specific document. @@ -597,7 +607,11 @@ with Mistral( "tool_call_id": "", "result": "", }, - ], stream=True) + ], stream=True, completion_args={ + "response_format": { + "type": "text", + }, + }) with res as event_stream: for event in event_stream: @@ -712,7 +726,7 @@ with Mistral( res = None try: - res = mistral.models.list() + res = mistral.models.retrieve(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") # Handle response print(res) @@ -746,7 +760,7 @@ with Mistral( **Inherit from [`MistralError`](./src/mistralai/models/mistralerror.py)**: -* [`HTTPValidationError`](./src/mistralai/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 47 of 68 methods.* +* [`HTTPValidationError`](./src/mistralai/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 48 of 70 methods.* * [`ResponseValidationError`](./src/mistralai/models/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute. diff --git a/RELEASES.md b/RELEASES.md index b65d9d0c..e43d3f33 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -328,4 +328,14 @@ Based on: ### Generated - [python v1.9.11] . ### Releases -- [PyPI v1.9.11] https://pypi.org/project/mistralai/1.9.11 - . \ No newline at end of file +- [PyPI v1.9.11] https://pypi.org/project/mistralai/1.9.11 - . + +## 2025-12-16 19:44:09 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.10.0] . +### Releases +- [PyPI v1.10.0] https://pypi.org/project/mistralai/1.10.0 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index b15a88aa..b230b016 100644 --- a/USAGE.md +++ b/USAGE.md @@ -13,12 +13,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.chat.complete(model="mistral-small-latest", messages=[ + res = mistral.chat.complete(model="mistral-large-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ], stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -39,12 +41,14 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.chat.complete_async(model="mistral-small-latest", messages=[ + res = await mistral.chat.complete_async(model="mistral-large-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ], stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -120,7 +124,9 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="", stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -146,7 +152,9 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="", stream=False, response_format={ + "type": "text", + }) # Handle response print(res) diff --git a/docs/models/agent.md b/docs/models/agent.md index 686fae75..ee054dd3 100644 --- a/docs/models/agent.md +++ b/docs/models/agent.md @@ -12,8 +12,12 @@ | `name` | *str* | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `object` | [Optional[models.AgentObject]](../models/agentobject.md) | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | | `version` | *int* | :heavy_check_mark: | N/A | +| `versions` | List[*int*] | :heavy_check_mark: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | \ No newline at end of file +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `deployment_chat` | *bool* | :heavy_check_mark: | N/A | +| `source` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentconversation.md b/docs/models/agentconversation.md index 772cc80e..92fd673c 100644 --- a/docs/models/agentconversation.md +++ b/docs/models/agentconversation.md @@ -7,8 +7,10 @@ | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | | `object` | [Optional[models.AgentConversationObject]](../models/agentconversationobject.md) | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentcreationrequest.md b/docs/models/agentcreationrequest.md index 34060d9a..afc27d3b 100644 --- a/docs/models/agentcreationrequest.md +++ b/docs/models/agentcreationrequest.md @@ -11,4 +11,5 @@ | `model` | *str* | :heavy_check_mark: | N/A | | `name` | *str* | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsdeleterequest.md b/docs/models/agentsapiv1agentsdeleterequest.md new file mode 100644 index 00000000..2799f418 --- /dev/null +++ b/docs/models/agentsapiv1agentsdeleterequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1AgentsDeleteRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetrequest.md b/docs/models/agentsapiv1agentsgetrequest.md index b46ac23d..825e03a0 100644 --- a/docs/models/agentsapiv1agentsgetrequest.md +++ b/docs/models/agentsapiv1agentsgetrequest.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistrequest.md b/docs/models/agentsapiv1agentslistrequest.md index b5bcee62..c4f05b5c 100644 --- a/docs/models/agentsapiv1agentslistrequest.md +++ b/docs/models/agentsapiv1agentslistrequest.md @@ -3,7 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `sources` | List[[models.RequestSource](../models/requestsource.md)] | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsdeleterequest.md b/docs/models/agentsapiv1conversationsdeleterequest.md new file mode 100644 index 00000000..c6eed281 --- /dev/null +++ b/docs/models/agentsapiv1conversationsdeleterequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1ConversationsDeleteRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationslistrequest.md b/docs/models/agentsapiv1conversationslistrequest.md index 528a055a..62c9011f 100644 --- a/docs/models/agentsapiv1conversationslistrequest.md +++ b/docs/models/agentsapiv1conversationslistrequest.md @@ -6,4 +6,5 @@ | Field | Type | Required | Description | | ------------------ | ------------------ | ------------------ | ------------------ | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index 73615ed9..2a0c4144 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -3,20 +3,21 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.AgentsCompletionRequestStop]](../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.AgentsCompletionRequestStop]](../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index b0aac6c1..b2ccd4e8 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -3,20 +3,21 @@ ## Fields -| Field | Type | Required | Description | Example | -| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file diff --git a/docs/models/agentupdaterequest.md b/docs/models/agentupdaterequest.md index 9da03d03..641d1e40 100644 --- a/docs/models/agentupdaterequest.md +++ b/docs/models/agentupdaterequest.md @@ -11,4 +11,6 @@ | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/attributes.md b/docs/models/attributes.md new file mode 100644 index 00000000..147708d9 --- /dev/null +++ b/docs/models/attributes.md @@ -0,0 +1,59 @@ +# Attributes + + +## Supported Types + +### `bool` + +```python +value: bool = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + +### `float` + +```python +value: float = /* values here */ +``` + +### `datetime` + +```python +value: datetime = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + +### `List[int]` + +```python +value: List[int] = /* values here */ +``` + +### `List[float]` + +```python +value: List[float] = /* values here */ +``` + +### `List[bool]` + +```python +value: List[bool] = /* values here */ +``` + diff --git a/docs/models/audiotranscriptionrequest.md b/docs/models/audiotranscriptionrequest.md index e876de18..f2e17dd3 100644 --- a/docs/models/audiotranscriptionrequest.md +++ b/docs/models/audiotranscriptionrequest.md @@ -3,13 +3,13 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `file` | [Optional[models.File]](../models/file.md) | :heavy_minus_sign: | N/A | -| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | -| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | -| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `stream` | *Optional[Literal[False]]* | :heavy_minus_sign: | N/A | -| `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | voxtral-mini-latest | +| `file` | [Optional[models.File]](../models/file.md) | :heavy_minus_sign: | N/A | | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `stream` | *Optional[Literal[False]]* | :heavy_minus_sign: | N/A | | +| `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | | \ No newline at end of file diff --git a/docs/models/batchjobin.md b/docs/models/batchjobin.md index b5b13786..6fd06696 100644 --- a/docs/models/batchjobin.md +++ b/docs/models/batchjobin.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | -| `input_files` | List[*str*] | :heavy_check_mark: | N/A | -| `endpoint` | [models.APIEndpoint](../models/apiendpoint.md) | :heavy_check_mark: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | N/A | -| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input_files` | List[*str*] | :heavy_check_mark: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | +| `endpoint` | [models.APIEndpoint](../models/apiendpoint.md) | :heavy_check_mark: | N/A | | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | mistral-small-latest | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. | | +| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | The metadata of your choice to be associated with the batch inference job. | | +| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | The timeout in hours for the batch inference job. | | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index a9806a4d..109fa7b1 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -3,23 +3,24 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionrequesttoolchoice.md b/docs/models/chatcompletionrequesttoolchoice.md index 1646528d..dc82a8ef 100644 --- a/docs/models/chatcompletionrequesttoolchoice.md +++ b/docs/models/chatcompletionrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index 6faeb411..7d5fb411 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -3,23 +3,24 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequesttoolchoice.md b/docs/models/chatcompletionstreamrequesttoolchoice.md index cce0ca3e..43f3ca38 100644 --- a/docs/models/chatcompletionstreamrequesttoolchoice.md +++ b/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionStreamRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/docs/models/classificationrequest.md b/docs/models/classificationrequest.md index b9befc89..4b38c68a 100644 --- a/docs/models/classificationrequest.md +++ b/docs/models/classificationrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | -| `inputs` | [models.ClassificationRequestInputs](../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-moderation-latest | +| `inputs` | [models.ClassificationRequestInputs](../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | | \ No newline at end of file diff --git a/docs/models/completionargs.md b/docs/models/completionargs.md index 0d108225..60d09137 100644 --- a/docs/models/completionargs.md +++ b/docs/models/completionargs.md @@ -5,15 +5,15 @@ White-listed arguments from the completion API ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `stop` | [OptionalNullable[models.CompletionArgsStop]](../models/completionargsstop.md) | :heavy_minus_sign: | N/A | -| `presence_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `frequency_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `top_p` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `prediction` | [OptionalNullable[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | -| `response_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | -| `tool_choice` | [Optional[models.ToolChoiceEnum]](../models/toolchoiceenum.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `stop` | [OptionalNullable[models.CompletionArgsStop]](../models/completionargsstop.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `frequency_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `top_p` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `prediction` | [OptionalNullable[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `response_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | {
"type": "text"
} | +| `tool_choice` | [Optional[models.ToolChoiceEnum]](../models/toolchoiceenum.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/conversationrequest.md b/docs/models/conversationrequest.md index 141533e7..04378ae3 100644 --- a/docs/models/conversationrequest.md +++ b/docs/models/conversationrequest.md @@ -10,9 +10,11 @@ | `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `handoff_execution` | [OptionalNullable[models.HandoffExecution]](../models/handoffexecution.md) | :heavy_minus_sign: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.Tools](../models/tools.md)] | :heavy_minus_sign: | N/A | +| `tools` | List[[models.Tools](../models/tools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationrestartrequest.md b/docs/models/conversationrestartrequest.md index 61679df6..f389a1e5 100644 --- a/docs/models/conversationrestartrequest.md +++ b/docs/models/conversationrestartrequest.md @@ -12,4 +12,6 @@ Request to restart a new conversation from a given entry in the conversation. | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `from_entry_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequest.md b/docs/models/conversationrestartstreamrequest.md index 9548b336..d7358dc2 100644 --- a/docs/models/conversationrestartstreamrequest.md +++ b/docs/models/conversationrestartstreamrequest.md @@ -12,4 +12,6 @@ Request to restart a new conversation from a given entry in the conversation. | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `from_entry_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file diff --git a/docs/models/conversationstreamrequest.md b/docs/models/conversationstreamrequest.md index a571e2af..e403db68 100644 --- a/docs/models/conversationstreamrequest.md +++ b/docs/models/conversationstreamrequest.md @@ -10,9 +10,11 @@ | `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.ConversationStreamRequestTools](../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationStreamRequestTools](../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/documentout.md b/docs/models/documentout.md index b9e7b212..28df11eb 100644 --- a/docs/models/documentout.md +++ b/docs/models/documentout.md @@ -7,18 +7,20 @@ | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | N/A | | `library_id` | *str* | :heavy_check_mark: | N/A | -| `hash` | *str* | :heavy_check_mark: | N/A | -| `mime_type` | *str* | :heavy_check_mark: | N/A | -| `extension` | *str* | :heavy_check_mark: | N/A | -| `size` | *int* | :heavy_check_mark: | N/A | +| `hash` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `mime_type` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `extension` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `size` | *Nullable[int]* | :heavy_check_mark: | N/A | | `name` | *str* | :heavy_check_mark: | N/A | | `summary` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `last_processed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `number_of_pages` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `processing_status` | *str* | :heavy_check_mark: | N/A | -| `uploaded_by_id` | *str* | :heavy_check_mark: | N/A | +| `uploaded_by_id` | *Nullable[str]* | :heavy_check_mark: | N/A | | `uploaded_by_type` | *str* | :heavy_check_mark: | N/A | | `tokens_processing_main_content` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `tokens_processing_summary` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `tokens_processing_total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/documentupdatein.md b/docs/models/documentupdatein.md index 215ae95f..0993886d 100644 --- a/docs/models/documentupdatein.md +++ b/docs/models/documentupdatein.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, [models.Attributes](../models/attributes.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 2f48099f..0f2fc6a6 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -3,10 +3,10 @@ ## Fields -| Field | Type | Required | Description | Example | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | -| `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings. | | -| `output_dtype` | [Optional[models.EmbeddingDtype]](../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | -| `encoding_format` | [Optional[models.EncodingFormat]](../models/encodingformat.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | +| `model` | *str* | :heavy_check_mark: | The ID of the model to be used for embedding. | mistral-embed | +| `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | The text content to be embedded, can be a string or an array of strings for fast processing in bulk. | [
"Embed this sentence.",
"As well as this one."
] | +| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. | | +| `output_dtype` | [Optional[models.EmbeddingDtype]](../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | +| `encoding_format` | [Optional[models.EncodingFormat]](../models/encodingformat.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/embeddingrequestinputs.md b/docs/models/embeddingrequestinputs.md index a3f82c1c..527a089b 100644 --- a/docs/models/embeddingrequestinputs.md +++ b/docs/models/embeddingrequestinputs.md @@ -1,6 +1,6 @@ # EmbeddingRequestInputs -Text to embed. +The text content to be embedded, can be a string or an array of strings for fast processing in bulk. ## Supported Types diff --git a/docs/models/filesapirouteslistfilesrequest.md b/docs/models/filesapirouteslistfilesrequest.md index b28ab3fe..3801a96e 100644 --- a/docs/models/filesapirouteslistfilesrequest.md +++ b/docs/models/filesapirouteslistfilesrequest.md @@ -7,6 +7,7 @@ | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `include_total` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `sample_type` | List[[models.SampleType](../models/sampletype.md)] | :heavy_minus_sign: | N/A | | `source` | List[[models.Source](../models/source.md)] | :heavy_minus_sign: | N/A | | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/fimcompletionrequest.md b/docs/models/fimcompletionrequest.md index 7b785cf0..fde0b625 100644 --- a/docs/models/fimcompletionrequest.md +++ b/docs/models/fimcompletionrequest.md @@ -5,13 +5,14 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/docs/models/fimcompletionstreamrequest.md b/docs/models/fimcompletionstreamrequest.md index d49a6301..ba62d854 100644 --- a/docs/models/fimcompletionstreamrequest.md +++ b/docs/models/fimcompletionstreamrequest.md @@ -5,13 +5,14 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/docs/models/format_.md b/docs/models/format_.md new file mode 100644 index 00000000..97d286a4 --- /dev/null +++ b/docs/models/format_.md @@ -0,0 +1,11 @@ +# Format + +Format of the table + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `MARKDOWN` | markdown | +| `HTML` | html | \ No newline at end of file diff --git a/docs/models/librariesdocumentslistv1request.md b/docs/models/librariesdocumentslistv1request.md index 1b4eb24d..44f63001 100644 --- a/docs/models/librariesdocumentslistv1request.md +++ b/docs/models/librariesdocumentslistv1request.md @@ -9,5 +9,6 @@ | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `filters_attributes` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `sort_by` | *Optional[str]* | :heavy_minus_sign: | N/A | | `sort_order` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/libraryout.md b/docs/models/libraryout.md index cf4de41b..ebf46d57 100644 --- a/docs/models/libraryout.md +++ b/docs/models/libraryout.md @@ -9,15 +9,15 @@ | `name` | *str* | :heavy_check_mark: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `owner_id` | *str* | :heavy_check_mark: | N/A | +| `owner_id` | *Nullable[str]* | :heavy_check_mark: | N/A | | `owner_type` | *str* | :heavy_check_mark: | N/A | | `total_size` | *int* | :heavy_check_mark: | N/A | | `nb_documents` | *int* | :heavy_check_mark: | N/A | | `chunk_size` | *Nullable[int]* | :heavy_check_mark: | N/A | | `emoji` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `generated_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `generated_description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `explicit_user_members_count` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `explicit_workspace_members_count` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `org_sharing_role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `org_sharing_role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `generated_name` | *OptionalNullable[str]* | :heavy_minus_sign: | Generated Name | \ No newline at end of file diff --git a/docs/models/listfilesout.md b/docs/models/listfilesout.md index ee544c1b..bcb1f13a 100644 --- a/docs/models/listfilesout.md +++ b/docs/models/listfilesout.md @@ -7,4 +7,4 @@ | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | | `data` | List[[models.FileSchema](../models/fileschema.md)] | :heavy_check_mark: | N/A | | `object` | *str* | :heavy_check_mark: | N/A | -| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file +| `total` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modelcapabilities.md b/docs/models/modelcapabilities.md index 36b27938..646c8e94 100644 --- a/docs/models/modelcapabilities.md +++ b/docs/models/modelcapabilities.md @@ -6,8 +6,11 @@ | Field | Type | Required | Description | | ------------------ | ------------------ | ------------------ | ------------------ | | `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `ocr` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `moderation` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `audio` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modelconversation.md b/docs/models/modelconversation.md index f7f61a79..1a03ef7d 100644 --- a/docs/models/modelconversation.md +++ b/docs/models/modelconversation.md @@ -10,6 +10,7 @@ | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | | `object` | [Optional[models.ModelConversationObject]](../models/modelconversationobject.md) | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | diff --git a/docs/models/name.md b/docs/models/name.md new file mode 100644 index 00000000..18b978a8 --- /dev/null +++ b/docs/models/name.md @@ -0,0 +1,17 @@ +# Name + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/ocrpageobject.md b/docs/models/ocrpageobject.md index 9db3bb77..02473d44 100644 --- a/docs/models/ocrpageobject.md +++ b/docs/models/ocrpageobject.md @@ -8,4 +8,8 @@ | `index` | *int* | :heavy_check_mark: | The page index in a pdf document starting from 0 | | `markdown` | *str* | :heavy_check_mark: | The markdown string response of the page | | `images` | List[[models.OCRImageObject](../models/ocrimageobject.md)] | :heavy_check_mark: | List of all extracted images in the page | +| `tables` | List[[models.OCRTableObject](../models/ocrtableobject.md)] | :heavy_minus_sign: | List of all extracted tables in the page | +| `hyperlinks` | List[*str*] | :heavy_minus_sign: | List of all hyperlinks in the page | +| `header` | *OptionalNullable[str]* | :heavy_minus_sign: | Header of the page | +| `footer` | *OptionalNullable[str]* | :heavy_minus_sign: | Footer of the page | | `dimensions` | [Nullable[models.OCRPageDimensions]](../models/ocrpagedimensions.md) | :heavy_check_mark: | The dimensions of the PDF Page's screenshot image | \ No newline at end of file diff --git a/docs/models/ocrrequest.md b/docs/models/ocrrequest.md index 6a9c77ab..76e4da92 100644 --- a/docs/models/ocrrequest.md +++ b/docs/models/ocrrequest.md @@ -3,14 +3,17 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | +| `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/ocrtableobject.md b/docs/models/ocrtableobject.md new file mode 100644 index 00000000..4e27697c --- /dev/null +++ b/docs/models/ocrtableobject.md @@ -0,0 +1,10 @@ +# OCRTableObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `id` | *str* | :heavy_check_mark: | Table ID for extracted table in a page | +| `content` | *str* | :heavy_check_mark: | Content of the table in the given format | +| `format_` | [models.Format](../models/format_.md) | :heavy_check_mark: | Format of the table | \ No newline at end of file diff --git a/docs/models/prediction.md b/docs/models/prediction.md index 86e9c396..fae3c1ca 100644 --- a/docs/models/prediction.md +++ b/docs/models/prediction.md @@ -1,5 +1,7 @@ # Prediction +Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + ## Fields diff --git a/docs/models/requestsource.md b/docs/models/requestsource.md new file mode 100644 index 00000000..c81c1159 --- /dev/null +++ b/docs/models/requestsource.md @@ -0,0 +1,10 @@ +# RequestSource + + +## Values + +| Name | Value | +| ------------------ | ------------------ | +| `API` | api | +| `PLAYGROUND` | playground | +| `AGENT_BUILDER_V1` | agent_builder_v1 | \ No newline at end of file diff --git a/docs/models/responseformat.md b/docs/models/responseformat.md index 23a1641b..5cab22f2 100644 --- a/docs/models/responseformat.md +++ b/docs/models/responseformat.md @@ -1,9 +1,11 @@ # ResponseFormat +Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | -| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responseformats.md b/docs/models/responseformats.md index 06886afe..2f5f1e55 100644 --- a/docs/models/responseformats.md +++ b/docs/models/responseformats.md @@ -1,7 +1,5 @@ # ResponseFormats -An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. - ## Values diff --git a/docs/models/sharingdelete.md b/docs/models/sharingdelete.md index 71cacab6..1dcec095 100644 --- a/docs/models/sharingdelete.md +++ b/docs/models/sharingdelete.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `org_id` | *str* | :heavy_check_mark: | N/A | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | | `share_with_type` | [models.EntityType](../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | \ No newline at end of file diff --git a/docs/models/sharingin.md b/docs/models/sharingin.md index 537ede03..bac18c8d 100644 --- a/docs/models/sharingin.md +++ b/docs/models/sharingin.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `org_id` | *str* | :heavy_check_mark: | N/A | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `level` | [models.ShareEnum](../models/shareenum.md) | :heavy_check_mark: | N/A | | `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | | `share_with_type` | [models.EntityType](../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | \ No newline at end of file diff --git a/docs/models/sharingout.md b/docs/models/sharingout.md index 5844fe64..35aeff43 100644 --- a/docs/models/sharingout.md +++ b/docs/models/sharingout.md @@ -10,4 +10,4 @@ | `org_id` | *str* | :heavy_check_mark: | N/A | | `role` | *str* | :heavy_check_mark: | N/A | | `share_with_type` | *str* | :heavy_check_mark: | N/A | -| `share_with_uuid` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| `share_with_uuid` | *Nullable[str]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/tableformat.md b/docs/models/tableformat.md new file mode 100644 index 00000000..54f029b8 --- /dev/null +++ b/docs/models/tableformat.md @@ -0,0 +1,9 @@ +# TableFormat + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `MARKDOWN` | markdown | +| `HTML` | html | \ No newline at end of file diff --git a/docs/models/toolexecutiondeltaevent.md b/docs/models/toolexecutiondeltaevent.md index bfc9dc0e..7bee6d83 100644 --- a/docs/models/toolexecutiondeltaevent.md +++ b/docs/models/toolexecutiondeltaevent.md @@ -9,5 +9,5 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionDeltaEventName](../models/toolexecutiondeltaeventname.md) | :heavy_check_mark: | N/A | | `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondeltaeventname.md b/docs/models/toolexecutiondeltaeventname.md new file mode 100644 index 00000000..9c3edef8 --- /dev/null +++ b/docs/models/toolexecutiondeltaeventname.md @@ -0,0 +1,17 @@ +# ToolExecutionDeltaEventName + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolexecutiondoneevent.md b/docs/models/toolexecutiondoneevent.md index aa28df59..5898ea5e 100644 --- a/docs/models/toolexecutiondoneevent.md +++ b/docs/models/toolexecutiondoneevent.md @@ -9,5 +9,5 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionDoneEventName](../models/toolexecutiondoneeventname.md) | :heavy_check_mark: | N/A | | `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondoneeventname.md b/docs/models/toolexecutiondoneeventname.md new file mode 100644 index 00000000..6449079d --- /dev/null +++ b/docs/models/toolexecutiondoneeventname.md @@ -0,0 +1,17 @@ +# ToolExecutionDoneEventName + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolexecutionentry.md b/docs/models/toolexecutionentry.md index 174abdd1..3678116d 100644 --- a/docs/models/toolexecutionentry.md +++ b/docs/models/toolexecutionentry.md @@ -10,6 +10,6 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `name` | [models.Name](../models/name.md) | :heavy_check_mark: | N/A | | `arguments` | *str* | :heavy_check_mark: | N/A | | `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedevent.md b/docs/models/toolexecutionstartedevent.md index 82ea65e5..de81312b 100644 --- a/docs/models/toolexecutionstartedevent.md +++ b/docs/models/toolexecutionstartedevent.md @@ -9,5 +9,5 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionStartedEventName](../models/toolexecutionstartedeventname.md) | :heavy_check_mark: | N/A | | `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedeventname.md b/docs/models/toolexecutionstartedeventname.md new file mode 100644 index 00000000..3308c483 --- /dev/null +++ b/docs/models/toolexecutionstartedeventname.md @@ -0,0 +1,17 @@ +# ToolExecutionStartedEventName + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md index f06cce76..af768506 100644 --- a/docs/sdks/accesses/README.md +++ b/docs/sdks/accesses/README.md @@ -68,7 +68,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.update_or_create(library_id="36de3a24-5b1c-4c8f-9d84-d5642205a976", org_id="aadd9ae1-f285-4437-884a-091c77efa6fd", level="Viewer", share_with_uuid="0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", share_with_type="User") + res = mistral.beta.libraries.accesses.update_or_create(library_id="36de3a24-5b1c-4c8f-9d84-d5642205a976", level="Viewer", share_with_uuid="0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", share_with_type="User") # Handle response print(res) @@ -80,10 +80,10 @@ with Mistral( | Parameter | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | | `library_id` | *str* | :heavy_check_mark: | N/A | -| `org_id` | *str* | :heavy_check_mark: | N/A | | `level` | [models.ShareEnum](../../models/shareenum.md) | :heavy_check_mark: | N/A | | `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | | `share_with_type` | [models.EntityType](../../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -113,7 +113,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.delete(library_id="709e3cad-9fb2-4f4e-bf88-143cf1808107", org_id="0814a235-c2d0-4814-875a-4b85f93d3dc7", share_with_uuid="b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", share_with_type="User") + res = mistral.beta.libraries.accesses.delete(library_id="709e3cad-9fb2-4f4e-bf88-143cf1808107", share_with_uuid="b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", share_with_type="User") # Handle response print(res) @@ -125,9 +125,9 @@ with Mistral( | Parameter | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | | `library_id` | *str* | :heavy_check_mark: | N/A | -| `org_id` | *str* | :heavy_check_mark: | N/A | | `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | | `share_with_type` | [models.EntityType](../../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index a4e8b22e..87a411cd 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -31,7 +31,9 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="", stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -40,24 +42,25 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.AgentsCompletionRequestMessages](../../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.AgentsCompletionRequestStop]](../../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionRequestMessages](../../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.AgentsCompletionRequestStop]](../../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -91,7 +94,9 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=True) + ], agent_id="", stream=True, response_format={ + "type": "text", + }) with res as event_stream: for event in event_stream: @@ -102,24 +107,25 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index c5c45e0f..213ab710 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -26,12 +26,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.chat.complete(model="mistral-small-latest", messages=[ + res = mistral.chat.complete(model="mistral-large-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ], stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -40,27 +42,28 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -89,12 +92,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.chat.stream(model="mistral-small-latest", messages=[ + res = mistral.chat.stream(model="mistral-large-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=True) + ], stream=True, response_format={ + "type": "text", + }) with res as event_stream: for event in event_stream: @@ -105,27 +110,28 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 87eb8d69..75b8c333 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -40,11 +40,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | -| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-moderation-latest | +| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -120,7 +120,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.classify(model="Silverado", inputs=[ + res = mistral.classifiers.classify(model="mistral-moderation-latest", inputs=[ "", ]) @@ -131,11 +131,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | -| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-moderation-latest | +| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index d3ce96c2..1e2d560e 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -10,6 +10,7 @@ * [start](#start) - Create a conversation and append entries to it. * [list](#list) - List all created conversations. * [get](#get) - Retrieve a conversation information. +* [delete](#delete) - Delete a conversation. * [append](#append) - Append new entries to an existing conversation. * [get_history](#get_history) - Retrieve all entries in a conversation. * [get_messages](#get_messages) - Retrieve all messages in a conversation. @@ -34,7 +35,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.start(inputs="", stream=False) + res = mistral.beta.conversations.start(inputs="", stream=False, completion_args={ + "response_format": { + "type": "text", + }, + }) # Handle response print(res) @@ -50,11 +55,13 @@ with Mistral( | `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `handoff_execution` | [OptionalNullable[models.HandoffExecution]](../../models/handoffexecution.md) | :heavy_minus_sign: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.Tools](../../models/tools.md)] | :heavy_minus_sign: | N/A | +| `tools` | List[[models.Tools](../../models/tools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | @@ -98,6 +105,7 @@ with Mistral( | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -152,6 +160,42 @@ with Mistral( | models.HTTPValidationError | 422 | application/json | | models.SDKError | 4XX, 5XX | \*/\* | +## delete + +Delete a conversation given a conversation_id. + +### Example Usage + + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.conversations.delete(conversation_id="") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + ## append Run completion on the history of the conversation and the user entries. Return the new created entries. @@ -168,7 +212,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.append(conversation_id="", inputs=[], stream=False, store=True, handoff_execution="server") + res = mistral.beta.conversations.append(conversation_id="", inputs=[], stream=False, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", + }, + }) # Handle response print(res) @@ -296,7 +344,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.restart(conversation_id="", inputs="", from_entry_id="", stream=False, store=True, handoff_execution="server") + res = mistral.beta.conversations.restart(conversation_id="", inputs="", from_entry_id="", stream=False, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", + }, + }) # Handle response print(res) @@ -314,6 +366,8 @@ with Mistral( | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -350,7 +404,11 @@ with Mistral( "tool_call_id": "", "result": "", }, - ], stream=True) + ], stream=True, completion_args={ + "response_format": { + "type": "text", + }, + }) with res as event_stream: for event in event_stream: @@ -368,11 +426,13 @@ with Mistral( | `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.ConversationStreamRequestTools](../../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationStreamRequestTools](../../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | @@ -403,7 +463,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.append_stream(conversation_id="", inputs="", stream=True, store=True, handoff_execution="server") + res = mistral.beta.conversations.append_stream(conversation_id="", inputs="", stream=True, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", + }, + }) with res as event_stream: for event in event_stream: @@ -459,7 +523,11 @@ with Mistral( "content": "", "prefix": False, }, - ], from_entry_id="", stream=True, store=True, handoff_execution="server") + ], from_entry_id="", stream=True, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", + }, + }) with res as event_stream: for event in event_stream: @@ -479,6 +547,8 @@ with Mistral( | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md index 71848b07..c1551925 100644 --- a/docs/sdks/documents/README.md +++ b/docs/sdks/documents/README.md @@ -7,7 +7,7 @@ ### Available Operations -* [list](#list) - List document in a given library. +* [list](#list) - List documents in a given library. * [upload](#upload) - Upload a new document. * [get](#get) - Retrieve the metadata of a specific document. * [update](#update) - Update the metadata of a specific document. @@ -49,6 +49,7 @@ with Mistral( | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `filters_attributes` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `sort_by` | *Optional[str]* | :heavy_minus_sign: | N/A | | `sort_order` | *Optional[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | @@ -181,6 +182,7 @@ with Mistral( | `library_id` | *str* | :heavy_check_mark: | N/A | | `document_id` | *str* | :heavy_check_mark: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, [models.Attributes](../../models/attributes.md)] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 9554e7b7..b03ea9cd 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -37,14 +37,14 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | -| `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings. | | -| `output_dtype` | [Optional[models.EmbeddingDtype]](../../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | -| `encoding_format` | [Optional[models.EncodingFormat]](../../models/encodingformat.md) | :heavy_minus_sign: | N/A | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | +| `model` | *str* | :heavy_check_mark: | The ID of the model to be used for embedding. | mistral-embed | +| `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | The text content to be embedded, can be a string or an array of strings for fast processing in bulk. | [
"Embed this sentence.",
"As well as this one."
] | +| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. | | +| `output_dtype` | [Optional[models.EmbeddingDtype]](../../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | +| `encoding_format` | [Optional[models.EncodingFormat]](../../models/encodingformat.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index e8d28c86..0a68c1f5 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -78,7 +78,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.list(page=0, page_size=100) + res = mistral.files.list(page=0, page_size=100, include_total=True) # Handle response print(res) @@ -91,6 +91,7 @@ with Mistral( | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `include_total` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `sample_type` | List[[models.SampleType](../../models/sampletype.md)] | :heavy_minus_sign: | N/A | | `source` | List[[models.Source](../../models/source.md)] | :heavy_minus_sign: | N/A | | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index cce1c070..d282a810 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -37,7 +37,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -45,6 +45,7 @@ with Mistral( | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.FIMCompletionRequestStop]](../../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | @@ -76,7 +77,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.stream(model="codestral-2405", prompt="def", top_p=1, stream=True, suffix="return a+b") + res = mistral.fim.stream(model="codestral-latest", prompt="def", top_p=1, stream=True, suffix="return a+b") with res as event_stream: for event in event_stream: @@ -89,7 +90,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -97,6 +98,7 @@ with Mistral( | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/mistralagents/README.md index 44b7fcf2..767ba56d 100644 --- a/docs/sdks/mistralagents/README.md +++ b/docs/sdks/mistralagents/README.md @@ -11,6 +11,7 @@ * [list](#list) - List agent entities. * [get](#get) - Retrieve an agent entity. * [update](#update) - Update an agent entity. +* [delete](#delete) - Delete an agent entity. * [update_version](#update_version) - Update an agent version. ## create @@ -29,7 +30,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.create(model="LeBaron", name="") + res = mistral.beta.agents.create(model="LeBaron", name="", completion_args={ + "response_format": { + "type": "text", + }, + }) # Handle response print(res) @@ -47,6 +52,7 @@ with Mistral( | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -89,6 +95,11 @@ with Mistral( | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `sources` | List[[models.RequestSource](../../models/requestsource.md)] | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -130,6 +141,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -159,7 +171,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.update(agent_id="") + res = mistral.beta.agents.update(agent_id="", completion_args={ + "response_format": { + "type": "text", + }, + }) # Handle response print(res) @@ -178,6 +194,8 @@ with Mistral( | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -191,6 +209,42 @@ with Mistral( | models.HTTPValidationError | 422 | application/json | | models.SDKError | 4XX, 5XX | \*/\* | +## delete + +Delete an agent entity. + +### Example Usage + + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.agents.delete(agent_id="") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + ## update_version Switch the version of an agent. diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 0ef3f138..469a2029 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -75,7 +75,7 @@ with Mistral( res = mistral.batch.jobs.create(input_files=[ "fe3343a2-3b8d-404b-ba32-a78dede2614a", - ], endpoint="/v1/moderations", timeout_hours=24) + ], endpoint="/v1/moderations", model="mistral-small-latest", timeout_hours=24) # Handle response print(res) @@ -84,15 +84,15 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `input_files` | List[*str*] | :heavy_check_mark: | N/A | -| `endpoint` | [models.APIEndpoint](../../models/apiendpoint.md) | :heavy_check_mark: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | N/A | -| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input_files` | List[*str*] | :heavy_check_mark: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | +| `endpoint` | [models.APIEndpoint](../../models/apiendpoint.md) | :heavy_check_mark: | N/A | | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | mistral-small-latest | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. | | +| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | The metadata of your choice to be associated with the batch inference job. | | +| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | The timeout in hours for the batch inference job. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 3877c545..94491520 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -49,10 +49,9 @@ with Mistral( ### Errors -| Error Type | Status Code | Content Type | -| -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | ## retrieve diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index c0c1293e..9264d104 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -30,6 +30,10 @@ with Mistral( "url": "https://measly-scrap.com", }, "type": "image_url", + }, bbox_annotation_format={ + "type": "text", + }, document_annotation_format={ + "type": "text", }) # Handle response @@ -39,18 +43,21 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `document` | [models.Document](../../models/document.md) | :heavy_check_mark: | Document to run OCR on | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `document` | [models.Document](../../models/document.md) | :heavy_check_mark: | Document to run OCR on | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | +| `table_format` | [OptionalNullable[models.TableFormat]](../../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/transcriptions/README.md b/docs/sdks/transcriptions/README.md index 022066ac..52b7884e 100644 --- a/docs/sdks/transcriptions/README.md +++ b/docs/sdks/transcriptions/README.md @@ -8,7 +8,7 @@ API for audio transcription. ### Available Operations * [complete](#complete) - Create Transcription -* [stream](#stream) - Create streaming transcription (SSE) +* [stream](#stream) - Create Streaming Transcription (SSE) ## complete @@ -35,16 +35,16 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `file` | [Optional[models.File]](../../models/file.md) | :heavy_minus_sign: | N/A | -| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | -| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | -| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | voxtral-mini-latest | +| `file` | [Optional[models.File]](../../models/file.md) | :heavy_minus_sign: | N/A | | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -58,7 +58,7 @@ with Mistral( ## stream -Create streaming transcription (SSE) +Create Streaming Transcription (SSE) ### Example Usage diff --git a/pyproject.toml b/pyproject.toml index 4bea6627..58efd52d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.9.12" +version = "1.10.0" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index fa0b5e7d..851d6fbe 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.9.11" +__version__: str = "1.10.0" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.687.13" -__user_agent__: str = "speakeasy-sdk/python 1.9.11 2.687.13 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.10.0 2.687.13 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/accesses.py b/src/mistralai/accesses.py index ea33517b..dd8ffade 100644 --- a/src/mistralai/accesses.py +++ b/src/mistralai/accesses.py @@ -194,10 +194,10 @@ def update_or_create( self, *, library_id: str, - org_id: str, level: models.ShareEnum, share_with_uuid: str, share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -208,10 +208,10 @@ def update_or_create( Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. :param library_id: - :param org_id: :param level: :param share_with_uuid: The id of the entity (user, workspace or organization) to share with :param share_with_type: The type of entity, used to share a library. + :param org_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -300,10 +300,10 @@ async def update_or_create_async( self, *, library_id: str, - org_id: str, level: models.ShareEnum, share_with_uuid: str, share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -314,10 +314,10 @@ async def update_or_create_async( Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. :param library_id: - :param org_id: :param level: :param share_with_uuid: The id of the entity (user, workspace or organization) to share with :param share_with_type: The type of entity, used to share a library. + :param org_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -406,9 +406,9 @@ def delete( self, *, library_id: str, - org_id: str, share_with_uuid: str, share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -419,9 +419,9 @@ def delete( Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. :param library_id: - :param org_id: :param share_with_uuid: The id of the entity (user, workspace or organization) to share with :param share_with_type: The type of entity, used to share a library. + :param org_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -509,9 +509,9 @@ async def delete_async( self, *, library_id: str, - org_id: str, share_with_uuid: str, share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -522,9 +522,9 @@ async def delete_async( Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. :param library_id: - :param org_id: :param share_with_uuid: The id of the entity (user, workspace or organization) to share with :param share_with_type: The type of entity, used to share a library. + :param org_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index b220ca5b..173921fa 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -6,7 +6,7 @@ from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional, Union class Agents(BaseSDK): @@ -29,6 +29,7 @@ def complete( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -62,13 +63,14 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. :param parallel_tool_calls: :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method @@ -91,6 +93,7 @@ def complete( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.AgentsCompletionRequestMessages] ), @@ -188,6 +191,7 @@ async def complete_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -221,13 +225,14 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. :param parallel_tool_calls: :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method @@ -250,6 +255,7 @@ async def complete_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.AgentsCompletionRequestMessages] ), @@ -347,6 +353,7 @@ def stream( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -382,13 +389,14 @@ def stream( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. :param parallel_tool_calls: :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method @@ -411,6 +419,7 @@ def stream( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.AgentsCompletionStreamRequestMessages] ), @@ -516,6 +525,7 @@ async def stream_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -551,13 +561,14 @@ async def stream_async( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. :param parallel_tool_calls: :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method @@ -580,6 +591,7 @@ async def stream_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.AgentsCompletionStreamRequestMessages] ), diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 67777a1a..6a8058f7 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -6,7 +6,7 @@ from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional, Union # region imports from typing import Type @@ -105,6 +105,7 @@ def complete( stream: Optional[bool] = False, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -141,14 +142,15 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -174,6 +176,7 @@ def complete( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model(messages, List[models.Messages]), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -263,6 +266,7 @@ async def complete_async( stream: Optional[bool] = False, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -299,14 +303,15 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -332,6 +337,7 @@ async def complete_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model(messages, List[models.Messages]), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -429,6 +435,7 @@ def stream( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -467,14 +474,15 @@ def stream( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -500,6 +508,7 @@ def stream( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.ChatCompletionStreamRequestMessages] ), @@ -607,6 +616,7 @@ async def stream_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -645,14 +655,15 @@ async def stream_async( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -678,6 +689,7 @@ async def stream_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.ChatCompletionStreamRequestMessages] ), diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index 64551a96..a7d58abd 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -6,7 +6,7 @@ from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional, Union # region imports import typing @@ -228,15 +228,15 @@ def start( store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[models.HandoffExecution] = UNSET, instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] - ] = UNSET, + tools: Optional[Union[List[models.Tools], List[models.ToolsTypedDict]]] = None, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[int] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -252,11 +252,13 @@ def start( :param store: :param handoff_execution: :param instructions: - :param tools: + :param tools: List of tools which are available to the model during the conversation. :param completion_args: :param name: :param description: + :param metadata: :param agent_id: + :param agent_version: :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -279,13 +281,15 @@ def start( store=store, handoff_execution=handoff_execution, instructions=instructions, - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tools]]), + tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), name=name, description=description, + metadata=metadata, agent_id=agent_id, + agent_version=agent_version, model=model, ) @@ -356,15 +360,15 @@ async def start_async( store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[models.HandoffExecution] = UNSET, instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] - ] = UNSET, + tools: Optional[Union[List[models.Tools], List[models.ToolsTypedDict]]] = None, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[int] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -380,11 +384,13 @@ async def start_async( :param store: :param handoff_execution: :param instructions: - :param tools: + :param tools: List of tools which are available to the model during the conversation. :param completion_args: :param name: :param description: + :param metadata: :param agent_id: + :param agent_version: :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -407,13 +413,15 @@ async def start_async( store=store, handoff_execution=handoff_execution, instructions=instructions, - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tools]]), + tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), name=name, description=description, + metadata=metadata, agent_id=agent_id, + agent_version=agent_version, model=model, ) @@ -481,6 +489,7 @@ def list( *, page: Optional[int] = 0, page_size: Optional[int] = 100, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -492,6 +501,7 @@ def list( :param page: :param page_size: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -510,6 +520,7 @@ def list( request = models.AgentsAPIV1ConversationsListRequest( page=page, page_size=page_size, + metadata=metadata, ) req = self._build_request( @@ -573,6 +584,7 @@ async def list_async( *, page: Optional[int] = 0, page_size: Optional[int] = 100, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -584,6 +596,7 @@ async def list_async( :param page: :param page_size: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -602,6 +615,7 @@ async def list_async( request = models.AgentsAPIV1ConversationsListRequest( page=page, page_size=page_size, + metadata=metadata, ) req = self._build_request_async( @@ -842,6 +856,184 @@ async def get_async( raise models.SDKError("Unexpected response received", http_res) + def delete( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a conversation. + + Delete a conversation given a conversation_id. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsDeleteRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_delete", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a conversation. + + Delete a conversation given a conversation_id. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsDeleteRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_delete", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + def append( self, *, @@ -1450,6 +1642,8 @@ def restart( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1466,6 +1660,8 @@ def restart( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1491,7 +1687,9 @@ def restart( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + metadata=metadata, from_entry_id=from_entry_id, + agent_version=agent_version, ), ) @@ -1572,6 +1770,8 @@ async def restart_async( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1588,6 +1788,8 @@ async def restart_async( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1613,7 +1815,9 @@ async def restart_async( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + metadata=metadata, from_entry_id=from_entry_id, + agent_version=agent_version, ), ) @@ -1690,18 +1894,20 @@ def start_stream( models.ConversationStreamRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ + tools: Optional[ Union[ List[models.ConversationStreamRequestTools], List[models.ConversationStreamRequestToolsTypedDict], ] - ] = UNSET, + ] = None, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[int] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -1717,11 +1923,13 @@ def start_stream( :param store: :param handoff_execution: :param instructions: - :param tools: + :param tools: List of tools which are available to the model during the conversation. :param completion_args: :param name: :param description: + :param metadata: :param agent_id: + :param agent_version: :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -1745,14 +1953,16 @@ def start_stream( handoff_execution=handoff_execution, instructions=instructions, tools=utils.get_pydantic_model( - tools, OptionalNullable[List[models.ConversationStreamRequestTools]] + tools, Optional[List[models.ConversationStreamRequestTools]] ), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), name=name, description=description, + metadata=metadata, agent_id=agent_id, + agent_version=agent_version, model=model, ) @@ -1832,18 +2042,20 @@ async def start_stream_async( models.ConversationStreamRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ + tools: Optional[ Union[ List[models.ConversationStreamRequestTools], List[models.ConversationStreamRequestToolsTypedDict], ] - ] = UNSET, + ] = None, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[int] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -1859,11 +2071,13 @@ async def start_stream_async( :param store: :param handoff_execution: :param instructions: - :param tools: + :param tools: List of tools which are available to the model during the conversation. :param completion_args: :param name: :param description: + :param metadata: :param agent_id: + :param agent_version: :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -1887,14 +2101,16 @@ async def start_stream_async( handoff_execution=handoff_execution, instructions=instructions, tools=utils.get_pydantic_model( - tools, OptionalNullable[List[models.ConversationStreamRequestTools]] + tools, Optional[List[models.ConversationStreamRequestTools]] ), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), name=name, description=description, + metadata=metadata, agent_id=agent_id, + agent_version=agent_version, model=model, ) @@ -2230,6 +2446,8 @@ def restart_stream( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -2246,6 +2464,8 @@ def restart_stream( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -2271,7 +2491,9 @@ def restart_stream( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + metadata=metadata, from_entry_id=from_entry_id, + agent_version=agent_version, ), ) @@ -2359,6 +2581,8 @@ async def restart_stream_async( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -2375,6 +2599,8 @@ async def restart_stream_async( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -2400,7 +2626,9 @@ async def restart_stream_async( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + metadata=metadata, from_entry_id=from_entry_id, + agent_version=agent_version, ), ) diff --git a/src/mistralai/documents.py b/src/mistralai/documents.py index 5f8c6b9f..c1497bff 100644 --- a/src/mistralai/documents.py +++ b/src/mistralai/documents.py @@ -6,7 +6,7 @@ from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Mapping, Optional, Union +from typing import Any, Dict, Mapping, Optional, Union class Documents(BaseSDK): @@ -19,6 +19,7 @@ def list( search: OptionalNullable[str] = UNSET, page_size: Optional[int] = 100, page: Optional[int] = 0, + filters_attributes: OptionalNullable[str] = UNSET, sort_by: Optional[str] = "created_at", sort_order: Optional[str] = "desc", retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -26,7 +27,7 @@ def list( timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, ) -> models.ListDocumentOut: - r"""List document in a given library. + r"""List documents in a given library. Given a library, lists the document that have been uploaded to that library. @@ -34,6 +35,7 @@ def list( :param search: :param page_size: :param page: + :param filters_attributes: :param sort_by: :param sort_order: :param retries: Override the default retry configuration for this method @@ -56,6 +58,7 @@ def list( search=search, page_size=page_size, page=page, + filters_attributes=filters_attributes, sort_by=sort_by, sort_order=sort_order, ) @@ -123,6 +126,7 @@ async def list_async( search: OptionalNullable[str] = UNSET, page_size: Optional[int] = 100, page: Optional[int] = 0, + filters_attributes: OptionalNullable[str] = UNSET, sort_by: Optional[str] = "created_at", sort_order: Optional[str] = "desc", retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -130,7 +134,7 @@ async def list_async( timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, ) -> models.ListDocumentOut: - r"""List document in a given library. + r"""List documents in a given library. Given a library, lists the document that have been uploaded to that library. @@ -138,6 +142,7 @@ async def list_async( :param search: :param page_size: :param page: + :param filters_attributes: :param sort_by: :param sort_order: :param retries: Override the default retry configuration for this method @@ -160,6 +165,7 @@ async def list_async( search=search, page_size=page_size, page=page, + filters_attributes=filters_attributes, sort_by=sort_by, sort_order=sort_order, ) @@ -612,6 +618,9 @@ def update( library_id: str, document_id: str, name: OptionalNullable[str] = UNSET, + attributes: OptionalNullable[ + Union[Dict[str, models.Attributes], Dict[str, models.AttributesTypedDict]] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -624,6 +633,7 @@ def update( :param library_id: :param document_id: :param name: + :param attributes: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -644,6 +654,7 @@ def update( document_id=document_id, document_update_in=models.DocumentUpdateIn( name=name, + attributes=attributes, ), ) @@ -716,6 +727,9 @@ async def update_async( library_id: str, document_id: str, name: OptionalNullable[str] = UNSET, + attributes: OptionalNullable[ + Union[Dict[str, models.Attributes], Dict[str, models.AttributesTypedDict]] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -728,6 +742,7 @@ async def update_async( :param library_id: :param document_id: :param name: + :param attributes: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -748,6 +763,7 @@ async def update_async( document_id=document_id, document_update_in=models.DocumentUpdateIn( name=name, + attributes=attributes, ), ) diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index 1822a1ec..76e8e719 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -31,9 +31,9 @@ def create( Embeddings - :param model: ID of the model to use. - :param inputs: Text to embed. - :param output_dimension: The dimension of the output embeddings. + :param model: The ID of the model to be used for embedding. + :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. :param output_dtype: :param encoding_format: :param retries: Override the default retry configuration for this method @@ -137,9 +137,9 @@ async def create_async( Embeddings - :param model: ID of the model to use. - :param inputs: Text to embed. - :param output_dimension: The dimension of the output embeddings. + :param model: The ID of the model to be used for embedding. + :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. :param output_dtype: :param encoding_format: :param retries: Override the default retry configuration for this method diff --git a/src/mistralai/files.py b/src/mistralai/files.py index c6e438af..ae4eb779 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -212,6 +212,7 @@ def list( *, page: Optional[int] = 0, page_size: Optional[int] = 100, + include_total: Optional[bool] = True, sample_type: OptionalNullable[List[models.SampleType]] = UNSET, source: OptionalNullable[List[models.Source]] = UNSET, search: OptionalNullable[str] = UNSET, @@ -227,6 +228,7 @@ def list( :param page: :param page_size: + :param include_total: :param sample_type: :param source: :param search: @@ -249,6 +251,7 @@ def list( request = models.FilesAPIRoutesListFilesRequest( page=page, page_size=page_size, + include_total=include_total, sample_type=sample_type, source=source, search=search, @@ -310,6 +313,7 @@ async def list_async( *, page: Optional[int] = 0, page_size: Optional[int] = 100, + include_total: Optional[bool] = True, sample_type: OptionalNullable[List[models.SampleType]] = UNSET, source: OptionalNullable[List[models.Source]] = UNSET, search: OptionalNullable[str] = UNSET, @@ -325,6 +329,7 @@ async def list_async( :param page: :param page_size: + :param include_total: :param sample_type: :param source: :param search: @@ -347,6 +352,7 @@ async def list_async( request = models.FilesAPIRoutesListFilesRequest( page=page, page_size=page_size, + include_total=include_total, sample_type=sample_type, source=source, search=search, diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index fa7b15c2..49bdb32e 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -6,7 +6,7 @@ from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Mapping, Optional, Union +from typing import Any, Dict, Mapping, Optional, Union class Fim(BaseSDK): @@ -28,6 +28,7 @@ def complete( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, suffix: OptionalNullable[str] = UNSET, min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -39,7 +40,7 @@ def complete( FIM completion. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -47,6 +48,7 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method @@ -72,6 +74,7 @@ def complete( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, prompt=prompt, suffix=suffix, min_tokens=min_tokens, @@ -152,6 +155,7 @@ async def complete_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, suffix: OptionalNullable[str] = UNSET, min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -163,7 +167,7 @@ async def complete_async( FIM completion. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -171,6 +175,7 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method @@ -196,6 +201,7 @@ async def complete_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, prompt=prompt, suffix=suffix, min_tokens=min_tokens, @@ -276,6 +282,7 @@ def stream( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, suffix: OptionalNullable[str] = UNSET, min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -287,7 +294,7 @@ def stream( Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -295,6 +302,7 @@ def stream( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method @@ -320,6 +328,7 @@ def stream( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, prompt=prompt, suffix=suffix, min_tokens=min_tokens, @@ -408,6 +417,7 @@ async def stream_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, suffix: OptionalNullable[str] = UNSET, min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -419,7 +429,7 @@ async def stream_async( Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -427,6 +437,7 @@ async def stream_async( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method @@ -452,6 +463,7 @@ async def stream_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, prompt=prompt, suffix=suffix, min_tokens=min_tokens, diff --git a/src/mistralai/mistral_agents.py b/src/mistralai/mistral_agents.py index 65f256d6..0d9ad0b7 100644 --- a/src/mistralai/mistral_agents.py +++ b/src/mistralai/mistral_agents.py @@ -6,7 +6,7 @@ from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional, Union class MistralAgents(BaseSDK): @@ -29,6 +29,7 @@ def create( ] = None, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -45,6 +46,7 @@ def create( :param completion_args: White-listed arguments from the completion API :param description: :param handoffs: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -72,6 +74,7 @@ def create( name=name, description=description, handoffs=handoffs, + metadata=metadata, ) req = self._build_request( @@ -150,6 +153,7 @@ async def create_async( ] = None, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -166,6 +170,7 @@ async def create_async( :param completion_args: White-listed arguments from the completion API :param description: :param handoffs: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -193,6 +198,7 @@ async def create_async( name=name, description=description, handoffs=handoffs, + metadata=metadata, ) req = self._build_request_async( @@ -259,6 +265,11 @@ def list( *, page: Optional[int] = 0, page_size: Optional[int] = 20, + deployment_chat: OptionalNullable[bool] = UNSET, + sources: OptionalNullable[List[models.RequestSource]] = UNSET, + name: OptionalNullable[str] = UNSET, + id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -270,6 +281,11 @@ def list( :param page: :param page_size: + :param deployment_chat: + :param sources: + :param name: + :param id: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -288,6 +304,11 @@ def list( request = models.AgentsAPIV1AgentsListRequest( page=page, page_size=page_size, + deployment_chat=deployment_chat, + sources=sources, + name=name, + id=id, + metadata=metadata, ) req = self._build_request( @@ -351,6 +372,11 @@ async def list_async( *, page: Optional[int] = 0, page_size: Optional[int] = 20, + deployment_chat: OptionalNullable[bool] = UNSET, + sources: OptionalNullable[List[models.RequestSource]] = UNSET, + name: OptionalNullable[str] = UNSET, + id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -362,6 +388,11 @@ async def list_async( :param page: :param page_size: + :param deployment_chat: + :param sources: + :param name: + :param id: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -380,6 +411,11 @@ async def list_async( request = models.AgentsAPIV1AgentsListRequest( page=page, page_size=page_size, + deployment_chat=deployment_chat, + sources=sources, + name=name, + id=id, + metadata=metadata, ) req = self._build_request_async( @@ -442,6 +478,7 @@ def get( self, *, agent_id: str, + agent_version: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -452,6 +489,7 @@ def get( Given an agent retrieve an agent entity with its attributes. :param agent_id: + :param agent_version: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -469,6 +507,7 @@ def get( request = models.AgentsAPIV1AgentsGetRequest( agent_id=agent_id, + agent_version=agent_version, ) req = self._build_request( @@ -531,6 +570,7 @@ async def get_async( self, *, agent_id: str, + agent_version: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -541,6 +581,7 @@ async def get_async( Given an agent retrieve an agent entity with its attributes. :param agent_id: + :param agent_version: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -558,6 +599,7 @@ async def get_async( request = models.AgentsAPIV1AgentsGetRequest( agent_id=agent_id, + agent_version=agent_version, ) req = self._build_request_async( @@ -634,6 +676,8 @@ def update( name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, + deployment_chat: OptionalNullable[bool] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -651,6 +695,8 @@ def update( :param name: :param description: :param handoffs: + :param deployment_chat: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -680,6 +726,8 @@ def update( name=name, description=description, handoffs=handoffs, + deployment_chat=deployment_chat, + metadata=metadata, ), ) @@ -764,6 +812,8 @@ async def update_async( name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, + deployment_chat: OptionalNullable[bool] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -781,6 +831,8 @@ async def update_async( :param name: :param description: :param handoffs: + :param deployment_chat: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -810,6 +862,8 @@ async def update_async( name=name, description=description, handoffs=handoffs, + deployment_chat=deployment_chat, + metadata=metadata, ), ) @@ -876,6 +930,180 @@ async def update_async( raise models.SDKError("Unexpected response received", http_res) + def delete( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent entity. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteRequest( + agent_id=agent_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent entity. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteRequest( + agent_id=agent_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + def update_version( self, *, diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index fb0a0de7..6c213756 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -237,12 +237,12 @@ def create( Create a new batch job, it will be queued for processing. - :param input_files: + :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` :param endpoint: - :param model: - :param agent_id: - :param metadata: - :param timeout_hours: + :param model: The model to be used for batch inference. + :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. + :param metadata: The metadata of your choice to be associated with the batch inference job. + :param timeout_hours: The timeout in hours for the batch inference job. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -338,12 +338,12 @@ async def create_async( Create a new batch job, it will be queued for processing. - :param input_files: + :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` :param endpoint: - :param model: - :param agent_id: - :param metadata: - :param timeout_hours: + :param model: The model to be used for batch inference. + :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. + :param metadata: The metadata of your choice to be associated with the batch inference job. + :param timeout_hours: The timeout in hours for the batch inference job. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 0298e73b..7895aeaa 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -41,6 +41,10 @@ AgentHandoffStartedEventType, AgentHandoffStartedEventTypedDict, ) + from .agents_api_v1_agents_deleteop import ( + AgentsAPIV1AgentsDeleteRequest, + AgentsAPIV1AgentsDeleteRequestTypedDict, + ) from .agents_api_v1_agents_getop import ( AgentsAPIV1AgentsGetRequest, AgentsAPIV1AgentsGetRequestTypedDict, @@ -65,6 +69,10 @@ AgentsAPIV1ConversationsAppendRequest, AgentsAPIV1ConversationsAppendRequestTypedDict, ) + from .agents_api_v1_conversations_deleteop import ( + AgentsAPIV1ConversationsDeleteRequest, + AgentsAPIV1ConversationsDeleteRequestTypedDict, + ) from .agents_api_v1_conversations_getop import ( AgentsAPIV1ConversationsGetRequest, AgentsAPIV1ConversationsGetRequestTypedDict, @@ -374,7 +382,12 @@ ) from .documentout import DocumentOut, DocumentOutTypedDict from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict - from .documentupdatein import DocumentUpdateIn, DocumentUpdateInTypedDict + from .documentupdatein import ( + Attributes, + AttributesTypedDict, + DocumentUpdateIn, + DocumentUpdateInTypedDict, + ) from .documenturlchunk import ( DocumentURLChunk, DocumentURLChunkType, @@ -720,8 +733,15 @@ from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict - from .ocrrequest import Document, DocumentTypedDict, OCRRequest, OCRRequestTypedDict + from .ocrrequest import ( + Document, + DocumentTypedDict, + OCRRequest, + OCRRequestTypedDict, + TableFormat, + ) from .ocrresponse import OCRResponse, OCRResponseTypedDict + from .ocrtableobject import Format, OCRTableObject, OCRTableObjectTypedDict from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict from .paginationinfo import PaginationInfo, PaginationInfoTypedDict @@ -732,6 +752,7 @@ ReferenceChunkType, ReferenceChunkTypedDict, ) + from .requestsource import RequestSource from .responsedoneevent import ( ResponseDoneEvent, ResponseDoneEventType, @@ -792,15 +813,21 @@ from .toolchoiceenum import ToolChoiceEnum from .toolexecutiondeltaevent import ( ToolExecutionDeltaEvent, + ToolExecutionDeltaEventName, + ToolExecutionDeltaEventNameTypedDict, ToolExecutionDeltaEventType, ToolExecutionDeltaEventTypedDict, ) from .toolexecutiondoneevent import ( ToolExecutionDoneEvent, + ToolExecutionDoneEventName, + ToolExecutionDoneEventNameTypedDict, ToolExecutionDoneEventType, ToolExecutionDoneEventTypedDict, ) from .toolexecutionentry import ( + Name, + NameTypedDict, ToolExecutionEntry, ToolExecutionEntryObject, ToolExecutionEntryType, @@ -808,6 +835,8 @@ ) from .toolexecutionstartedevent import ( ToolExecutionStartedEvent, + ToolExecutionStartedEventName, + ToolExecutionStartedEventNameTypedDict, ToolExecutionStartedEventType, ToolExecutionStartedEventTypedDict, ) @@ -928,6 +957,8 @@ "AgentUpdateRequestTools", "AgentUpdateRequestToolsTypedDict", "AgentUpdateRequestTypedDict", + "AgentsAPIV1AgentsDeleteRequest", + "AgentsAPIV1AgentsDeleteRequestTypedDict", "AgentsAPIV1AgentsGetRequest", "AgentsAPIV1AgentsGetRequestTypedDict", "AgentsAPIV1AgentsListRequest", @@ -940,6 +971,8 @@ "AgentsAPIV1ConversationsAppendRequestTypedDict", "AgentsAPIV1ConversationsAppendStreamRequest", "AgentsAPIV1ConversationsAppendStreamRequestTypedDict", + "AgentsAPIV1ConversationsDeleteRequest", + "AgentsAPIV1ConversationsDeleteRequestTypedDict", "AgentsAPIV1ConversationsGetRequest", "AgentsAPIV1ConversationsGetRequestTypedDict", "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", @@ -980,6 +1013,8 @@ "AssistantMessageContentTypedDict", "AssistantMessageRole", "AssistantMessageTypedDict", + "Attributes", + "AttributesTypedDict", "AudioChunk", "AudioChunkType", "AudioChunkTypedDict", @@ -1211,6 +1246,7 @@ "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FineTuneableModelType", "FinishReason", + "Format", "Function", "FunctionCall", "FunctionCallEntry", @@ -1412,6 +1448,8 @@ "ModerationObjectTypedDict", "ModerationResponse", "ModerationResponseTypedDict", + "Name", + "NameTypedDict", "NoResponseError", "OCRImageObject", "OCRImageObjectTypedDict", @@ -1423,6 +1461,8 @@ "OCRRequestTypedDict", "OCRResponse", "OCRResponseTypedDict", + "OCRTableObject", + "OCRTableObjectTypedDict", "OCRUsageInfo", "OCRUsageInfoTypedDict", "Object", @@ -1444,6 +1484,7 @@ "ReferenceChunkTypedDict", "Repositories", "RepositoriesTypedDict", + "RequestSource", "Response1", "Response1TypedDict", "ResponseBody", @@ -1490,6 +1531,7 @@ "SystemMessageContentChunksTypedDict", "SystemMessageContentTypedDict", "SystemMessageTypedDict", + "TableFormat", "TextChunk", "TextChunkType", "TextChunkTypedDict", @@ -1506,9 +1548,13 @@ "ToolChoiceEnum", "ToolChoiceTypedDict", "ToolExecutionDeltaEvent", + "ToolExecutionDeltaEventName", + "ToolExecutionDeltaEventNameTypedDict", "ToolExecutionDeltaEventType", "ToolExecutionDeltaEventTypedDict", "ToolExecutionDoneEvent", + "ToolExecutionDoneEventName", + "ToolExecutionDoneEventNameTypedDict", "ToolExecutionDoneEventType", "ToolExecutionDoneEventTypedDict", "ToolExecutionEntry", @@ -1516,6 +1562,8 @@ "ToolExecutionEntryType", "ToolExecutionEntryTypedDict", "ToolExecutionStartedEvent", + "ToolExecutionStartedEventName", + "ToolExecutionStartedEventNameTypedDict", "ToolExecutionStartedEventType", "ToolExecutionStartedEventTypedDict", "ToolFileChunk", @@ -1612,6 +1660,8 @@ "AgentHandoffStartedEvent": ".agenthandoffstartedevent", "AgentHandoffStartedEventType": ".agenthandoffstartedevent", "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", + "AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop", + "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", @@ -1624,6 +1674,8 @@ "AgentsAPIV1ConversationsAppendStreamRequestTypedDict": ".agents_api_v1_conversations_append_streamop", "AgentsAPIV1ConversationsAppendRequest": ".agents_api_v1_conversations_appendop", "AgentsAPIV1ConversationsAppendRequestTypedDict": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsDeleteRequest": ".agents_api_v1_conversations_deleteop", + "AgentsAPIV1ConversationsDeleteRequestTypedDict": ".agents_api_v1_conversations_deleteop", "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", "AgentsAPIV1ConversationsGetResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", @@ -1860,6 +1912,8 @@ "DocumentOutTypedDict": ".documentout", "DocumentTextContent": ".documenttextcontent", "DocumentTextContentTypedDict": ".documenttextcontent", + "Attributes": ".documentupdatein", + "AttributesTypedDict": ".documentupdatein", "DocumentUpdateIn": ".documentupdatein", "DocumentUpdateInTypedDict": ".documentupdatein", "DocumentURLChunk": ".documenturlchunk", @@ -2126,8 +2180,12 @@ "DocumentTypedDict": ".ocrrequest", "OCRRequest": ".ocrrequest", "OCRRequestTypedDict": ".ocrrequest", + "TableFormat": ".ocrrequest", "OCRResponse": ".ocrresponse", "OCRResponseTypedDict": ".ocrresponse", + "Format": ".ocrtableobject", + "OCRTableObject": ".ocrtableobject", + "OCRTableObjectTypedDict": ".ocrtableobject", "OCRUsageInfo": ".ocrusageinfo", "OCRUsageInfoTypedDict": ".ocrusageinfo", "OutputContentChunks": ".outputcontentchunks", @@ -2141,6 +2199,7 @@ "ReferenceChunk": ".referencechunk", "ReferenceChunkType": ".referencechunk", "ReferenceChunkTypedDict": ".referencechunk", + "RequestSource": ".requestsource", "ResponseDoneEvent": ".responsedoneevent", "ResponseDoneEventType": ".responsedoneevent", "ResponseDoneEventTypedDict": ".responsedoneevent", @@ -2197,16 +2256,24 @@ "ToolChoiceTypedDict": ".toolchoice", "ToolChoiceEnum": ".toolchoiceenum", "ToolExecutionDeltaEvent": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventName": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventNameTypedDict": ".toolexecutiondeltaevent", "ToolExecutionDeltaEventType": ".toolexecutiondeltaevent", "ToolExecutionDeltaEventTypedDict": ".toolexecutiondeltaevent", "ToolExecutionDoneEvent": ".toolexecutiondoneevent", + "ToolExecutionDoneEventName": ".toolexecutiondoneevent", + "ToolExecutionDoneEventNameTypedDict": ".toolexecutiondoneevent", "ToolExecutionDoneEventType": ".toolexecutiondoneevent", "ToolExecutionDoneEventTypedDict": ".toolexecutiondoneevent", + "Name": ".toolexecutionentry", + "NameTypedDict": ".toolexecutionentry", "ToolExecutionEntry": ".toolexecutionentry", "ToolExecutionEntryObject": ".toolexecutionentry", "ToolExecutionEntryType": ".toolexecutionentry", "ToolExecutionEntryTypedDict": ".toolexecutionentry", "ToolExecutionStartedEvent": ".toolexecutionstartedevent", + "ToolExecutionStartedEventName": ".toolexecutionstartedevent", + "ToolExecutionStartedEventNameTypedDict": ".toolexecutionstartedevent", "ToolExecutionStartedEventType": ".toolexecutionstartedevent", "ToolExecutionStartedEventTypedDict": ".toolexecutionstartedevent", "ToolFileChunk": ".toolfilechunk", diff --git a/src/mistralai/models/agent.py b/src/mistralai/models/agent.py index b6bf17ab..5d0b39fa 100644 --- a/src/mistralai/models/agent.py +++ b/src/mistralai/models/agent.py @@ -12,7 +12,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -50,8 +50,11 @@ class AgentTypedDict(TypedDict): name: str id: str version: int + versions: List[int] created_at: datetime updated_at: datetime + deployment_chat: bool + source: str instructions: NotRequired[Nullable[str]] r"""Instruction prompt the model will follow during the conversation.""" tools: NotRequired[List[AgentToolsTypedDict]] @@ -60,6 +63,7 @@ class AgentTypedDict(TypedDict): r"""White-listed arguments from the completion API""" description: NotRequired[Nullable[str]] handoffs: NotRequired[Nullable[List[str]]] + metadata: NotRequired[Nullable[Dict[str, Any]]] object: NotRequired[AgentObject] @@ -72,10 +76,16 @@ class Agent(BaseModel): version: int + versions: List[int] + created_at: datetime updated_at: datetime + deployment_chat: bool + + source: str + instructions: OptionalNullable[str] = UNSET r"""Instruction prompt the model will follow during the conversation.""" @@ -89,6 +99,8 @@ class Agent(BaseModel): handoffs: OptionalNullable[List[str]] = UNSET + metadata: OptionalNullable[Dict[str, Any]] = UNSET + object: Optional[AgentObject] = "agent" @model_serializer(mode="wrap") @@ -99,9 +111,10 @@ def serialize_model(self, handler): "completion_args", "description", "handoffs", + "metadata", "object", ] - nullable_fields = ["instructions", "description", "handoffs"] + nullable_fields = ["instructions", "description", "handoffs", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/agentconversation.py b/src/mistralai/models/agentconversation.py index 42ab84f5..7fa3dfe9 100644 --- a/src/mistralai/models/agentconversation.py +++ b/src/mistralai/models/agentconversation.py @@ -4,7 +4,7 @@ from datetime import datetime from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Literal, Optional +from typing import Any, Dict, Literal, Optional from typing_extensions import NotRequired, TypedDict @@ -20,7 +20,10 @@ class AgentConversationTypedDict(TypedDict): r"""Name given to the conversation.""" description: NotRequired[Nullable[str]] r"""Description of the what the conversation is about.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" object: NotRequired[AgentConversationObject] + agent_version: NotRequired[Nullable[int]] class AgentConversation(BaseModel): @@ -38,12 +41,17 @@ class AgentConversation(BaseModel): description: OptionalNullable[str] = UNSET r"""Description of the what the conversation is about.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + object: Optional[AgentConversationObject] = "conversation" + agent_version: OptionalNullable[int] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["name", "description", "object"] - nullable_fields = ["name", "description"] + optional_fields = ["name", "description", "metadata", "object", "agent_version"] + nullable_fields = ["name", "description", "metadata", "agent_version"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/agentcreationrequest.py b/src/mistralai/models/agentcreationrequest.py index 83a27028..6a14201e 100644 --- a/src/mistralai/models/agentcreationrequest.py +++ b/src/mistralai/models/agentcreationrequest.py @@ -11,7 +11,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -52,6 +52,7 @@ class AgentCreationRequestTypedDict(TypedDict): r"""White-listed arguments from the completion API""" description: NotRequired[Nullable[str]] handoffs: NotRequired[Nullable[List[str]]] + metadata: NotRequired[Nullable[Dict[str, Any]]] class AgentCreationRequest(BaseModel): @@ -72,6 +73,8 @@ class AgentCreationRequest(BaseModel): handoffs: OptionalNullable[List[str]] = UNSET + metadata: OptionalNullable[Dict[str, Any]] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -80,8 +83,9 @@ def serialize_model(self, handler): "completion_args", "description", "handoffs", + "metadata", ] - nullable_fields = ["instructions", "description", "handoffs"] + nullable_fields = ["instructions", "description", "handoffs", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/agents_api_v1_agents_deleteop.py b/src/mistralai/models/agents_api_v1_agents_deleteop.py new file mode 100644 index 00000000..38e04953 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_deleteop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsDeleteRequestTypedDict(TypedDict): + agent_id: str + + +class AgentsAPIV1AgentsDeleteRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/agents_api_v1_agents_getop.py b/src/mistralai/models/agents_api_v1_agents_getop.py index 5dbcecc1..dced6dbb 100644 --- a/src/mistralai/models/agents_api_v1_agents_getop.py +++ b/src/mistralai/models/agents_api_v1_agents_getop.py @@ -1,16 +1,53 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): agent_id: str + agent_version: NotRequired[Nullable[int]] class AgentsAPIV1AgentsGetRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + + agent_version: Annotated[ + OptionalNullable[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["agent_version"] + nullable_fields = ["agent_version"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/agents_api_v1_agents_listop.py b/src/mistralai/models/agents_api_v1_agents_listop.py index 25f48a62..69a157a6 100644 --- a/src/mistralai/models/agents_api_v1_agents_listop.py +++ b/src/mistralai/models/agents_api_v1_agents_listop.py @@ -1,15 +1,22 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel +from .requestsource import RequestSource +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import FieldMetadata, QueryParamMetadata -from typing import Optional +from pydantic import model_serializer +from typing import Any, Dict, List, Optional from typing_extensions import Annotated, NotRequired, TypedDict class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] + deployment_chat: NotRequired[Nullable[bool]] + sources: NotRequired[Nullable[List[RequestSource]]] + name: NotRequired[Nullable[str]] + id: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] class AgentsAPIV1AgentsListRequest(BaseModel): @@ -22,3 +29,66 @@ class AgentsAPIV1AgentsListRequest(BaseModel): Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = 20 + + deployment_chat: Annotated[ + OptionalNullable[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + sources: Annotated[ + OptionalNullable[List[RequestSource]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + name: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + id: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "page", + "page_size", + "deployment_chat", + "sources", + "name", + "id", + "metadata", + ] + nullable_fields = ["deployment_chat", "sources", "name", "id", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/agents_api_v1_conversations_deleteop.py b/src/mistralai/models/agents_api_v1_conversations_deleteop.py new file mode 100644 index 00000000..94126cae --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_deleteop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsDeleteRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching metadata.""" + + +class AgentsAPIV1ConversationsDeleteRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching metadata.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_listop.py b/src/mistralai/models/agents_api_v1_conversations_listop.py index f1d3d579..e1c8489b 100644 --- a/src/mistralai/models/agents_api_v1_conversations_listop.py +++ b/src/mistralai/models/agents_api_v1_conversations_listop.py @@ -3,15 +3,17 @@ from __future__ import annotations from .agentconversation import AgentConversation, AgentConversationTypedDict from .modelconversation import ModelConversation, ModelConversationTypedDict -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import FieldMetadata, QueryParamMetadata -from typing import Optional, Union +from pydantic import model_serializer +from typing import Any, Dict, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] + metadata: NotRequired[Nullable[Dict[str, Any]]] class AgentsAPIV1ConversationsListRequest(BaseModel): @@ -25,6 +27,41 @@ class AgentsAPIV1ConversationsListRequest(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = 100 + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["page", "page_size", "metadata"] + nullable_fields = ["metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + ResponseBodyTypedDict = TypeAliasType( "ResponseBodyTypedDict", diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index c832edfd..cff4df64 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -15,7 +15,7 @@ from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -77,16 +77,19 @@ class AgentsCompletionRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[AgentsCompletionRequestToolChoiceTypedDict] presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @@ -111,22 +114,26 @@ class AgentsCompletionRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET tool_choice: Optional[AgentsCompletionRequestToolChoice] = None presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None @@ -142,6 +149,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "response_format", "tools", "tool_choice", @@ -152,7 +160,14 @@ def serialize_model(self, handler): "parallel_tool_calls", "prompt_mode", ] - nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"] + nullable_fields = [ + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index 6e619b77..69edc23c 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -15,7 +15,7 @@ from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -76,16 +76,19 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoiceTypedDict] presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @@ -109,22 +112,26 @@ class AgentsCompletionStreamRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = None presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None @@ -140,6 +147,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "response_format", "tools", "tool_choice", @@ -150,7 +158,14 @@ def serialize_model(self, handler): "parallel_tool_calls", "prompt_mode", ] - nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"] + nullable_fields = [ + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/agentupdaterequest.py b/src/mistralai/models/agentupdaterequest.py index f6fcb27a..e496907c 100644 --- a/src/mistralai/models/agentupdaterequest.py +++ b/src/mistralai/models/agentupdaterequest.py @@ -11,7 +11,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -52,6 +52,8 @@ class AgentUpdateRequestTypedDict(TypedDict): name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] handoffs: NotRequired[Nullable[List[str]]] + deployment_chat: NotRequired[Nullable[bool]] + metadata: NotRequired[Nullable[Dict[str, Any]]] class AgentUpdateRequest(BaseModel): @@ -72,6 +74,10 @@ class AgentUpdateRequest(BaseModel): handoffs: OptionalNullable[List[str]] = UNSET + deployment_chat: OptionalNullable[bool] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -82,8 +88,18 @@ def serialize_model(self, handler): "name", "description", "handoffs", + "deployment_chat", + "metadata", + ] + nullable_fields = [ + "instructions", + "model", + "name", + "description", + "handoffs", + "deployment_chat", + "metadata", ] - nullable_fields = ["instructions", "model", "name", "description", "handoffs"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/audiotranscriptionrequest.py b/src/mistralai/models/audiotranscriptionrequest.py index 371d3ecc..308e2599 100644 --- a/src/mistralai/models/audiotranscriptionrequest.py +++ b/src/mistralai/models/audiotranscriptionrequest.py @@ -14,6 +14,7 @@ class AudioTranscriptionRequestTypedDict(TypedDict): model: str + r"""ID of the model to be used.""" file: NotRequired[FileTypedDict] file_url: NotRequired[Nullable[str]] r"""Url of a file to be transcribed""" @@ -29,6 +30,7 @@ class AudioTranscriptionRequestTypedDict(TypedDict): class AudioTranscriptionRequest(BaseModel): model: Annotated[str, FieldMetadata(multipart=True)] + r"""ID of the model to be used.""" file: Annotated[ Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) diff --git a/src/mistralai/models/batchjobin.py b/src/mistralai/models/batchjobin.py index aa0bb5be..475ba863 100644 --- a/src/mistralai/models/batchjobin.py +++ b/src/mistralai/models/batchjobin.py @@ -12,25 +12,35 @@ class BatchJobInTypedDict(TypedDict): input_files: List[str] + r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" endpoint: APIEndpoint model: NotRequired[Nullable[str]] + r"""The model to be used for batch inference.""" agent_id: NotRequired[Nullable[str]] + r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" metadata: NotRequired[Nullable[Dict[str, str]]] + r"""The metadata of your choice to be associated with the batch inference job.""" timeout_hours: NotRequired[int] + r"""The timeout in hours for the batch inference job.""" class BatchJobIn(BaseModel): input_files: List[str] + r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" endpoint: Annotated[APIEndpoint, PlainValidator(validate_open_enum(False))] model: OptionalNullable[str] = UNSET + r"""The model to be used for batch inference.""" agent_id: OptionalNullable[str] = UNSET + r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" metadata: OptionalNullable[Dict[str, str]] = UNSET + r"""The metadata of your choice to be associated with the batch inference job.""" timeout_hours: Optional[int] = 24 + r"""The timeout in hours for the batch inference job.""" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index 6f195f13..a309421b 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -15,7 +15,7 @@ from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -53,11 +53,13 @@ "ChatCompletionRequestToolChoiceTypedDict", Union[ToolChoiceTypedDict, ToolChoiceEnum], ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" ChatCompletionRequestToolChoice = TypeAliasType( "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" class ChatCompletionRequestTypedDict(TypedDict): @@ -77,17 +79,23 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: NotRequired[Nullable[MistralPromptMode]] r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] @@ -119,24 +127,31 @@ class ChatCompletionRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: Optional[ChatCompletionRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) @@ -155,6 +170,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "response_format", "tools", "tool_choice", @@ -170,6 +186,7 @@ def serialize_model(self, handler): "temperature", "max_tokens", "random_seed", + "metadata", "tools", "n", "prompt_mode", diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 0fa102e5..7a28cf01 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -15,7 +15,7 @@ from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -57,11 +57,13 @@ "ChatCompletionStreamRequestToolChoiceTypedDict", Union[ToolChoiceTypedDict, ToolChoiceEnum], ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" ChatCompletionStreamRequestToolChoice = TypeAliasType( "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" class ChatCompletionStreamRequestTypedDict(TypedDict): @@ -80,17 +82,23 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: NotRequired[Nullable[MistralPromptMode]] r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] @@ -121,24 +129,31 @@ class ChatCompletionStreamRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) @@ -157,6 +172,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "response_format", "tools", "tool_choice", @@ -172,6 +188,7 @@ def serialize_model(self, handler): "temperature", "max_tokens", "random_seed", + "metadata", "tools", "n", "prompt_mode", diff --git a/src/mistralai/models/conversationrequest.py b/src/mistralai/models/conversationrequest.py index 0fcca512..bd4368d2 100644 --- a/src/mistralai/models/conversationrequest.py +++ b/src/mistralai/models/conversationrequest.py @@ -12,7 +12,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -50,11 +50,14 @@ class ConversationRequestTypedDict(TypedDict): store: NotRequired[Nullable[bool]] handoff_execution: NotRequired[Nullable[HandoffExecution]] instructions: NotRequired[Nullable[str]] - tools: NotRequired[Nullable[List[ToolsTypedDict]]] + tools: NotRequired[List[ToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] agent_id: NotRequired[Nullable[str]] + agent_version: NotRequired[Nullable[int]] model: NotRequired[Nullable[str]] @@ -69,7 +72,8 @@ class ConversationRequest(BaseModel): instructions: OptionalNullable[str] = UNSET - tools: OptionalNullable[List[Tools]] = UNSET + tools: Optional[List[Tools]] = None + r"""List of tools which are available to the model during the conversation.""" completion_args: OptionalNullable[CompletionArgs] = UNSET @@ -77,8 +81,12 @@ class ConversationRequest(BaseModel): description: OptionalNullable[str] = UNSET + metadata: OptionalNullable[Dict[str, Any]] = UNSET + agent_id: OptionalNullable[str] = UNSET + agent_version: OptionalNullable[int] = UNSET + model: OptionalNullable[str] = UNSET @model_serializer(mode="wrap") @@ -92,18 +100,21 @@ def serialize_model(self, handler): "completion_args", "name", "description", + "metadata", "agent_id", + "agent_version", "model", ] nullable_fields = [ "store", "handoff_execution", "instructions", - "tools", "completion_args", "name", "description", + "metadata", "agent_id", + "agent_version", "model", ] null_default_fields = [] diff --git a/src/mistralai/models/conversationrestartrequest.py b/src/mistralai/models/conversationrestartrequest.py index 58376140..091917fe 100644 --- a/src/mistralai/models/conversationrestartrequest.py +++ b/src/mistralai/models/conversationrestartrequest.py @@ -3,8 +3,9 @@ from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional from typing_extensions import NotRequired, TypedDict @@ -22,6 +23,10 @@ class ConversationRestartRequestTypedDict(TypedDict): handoff_execution: NotRequired[ConversationRestartRequestHandoffExecution] completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + agent_version: NotRequired[Nullable[int]] + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" class ConversationRestartRequest(BaseModel): @@ -40,3 +45,46 @@ class ConversationRestartRequest(BaseModel): completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + agent_version: OptionalNullable[int] = UNSET + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + nullable_fields = ["metadata", "agent_version"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/conversationrestartstreamrequest.py b/src/mistralai/models/conversationrestartstreamrequest.py index f213aea3..4bcf255a 100644 --- a/src/mistralai/models/conversationrestartstreamrequest.py +++ b/src/mistralai/models/conversationrestartstreamrequest.py @@ -3,8 +3,9 @@ from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional from typing_extensions import NotRequired, TypedDict @@ -22,6 +23,10 @@ class ConversationRestartStreamRequestTypedDict(TypedDict): handoff_execution: NotRequired[ConversationRestartStreamRequestHandoffExecution] completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + agent_version: NotRequired[Nullable[int]] + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" class ConversationRestartStreamRequest(BaseModel): @@ -42,3 +47,46 @@ class ConversationRestartStreamRequest(BaseModel): completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + agent_version: OptionalNullable[int] = UNSET + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + nullable_fields = ["metadata", "agent_version"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/conversationstreamrequest.py b/src/mistralai/models/conversationstreamrequest.py index 0880727e..8c6d56c2 100644 --- a/src/mistralai/models/conversationstreamrequest.py +++ b/src/mistralai/models/conversationstreamrequest.py @@ -12,7 +12,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -50,11 +50,14 @@ class ConversationStreamRequestTypedDict(TypedDict): store: NotRequired[Nullable[bool]] handoff_execution: NotRequired[Nullable[ConversationStreamRequestHandoffExecution]] instructions: NotRequired[Nullable[str]] - tools: NotRequired[Nullable[List[ConversationStreamRequestToolsTypedDict]]] + tools: NotRequired[List[ConversationStreamRequestToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] agent_id: NotRequired[Nullable[str]] + agent_version: NotRequired[Nullable[int]] model: NotRequired[Nullable[str]] @@ -71,7 +74,8 @@ class ConversationStreamRequest(BaseModel): instructions: OptionalNullable[str] = UNSET - tools: OptionalNullable[List[ConversationStreamRequestTools]] = UNSET + tools: Optional[List[ConversationStreamRequestTools]] = None + r"""List of tools which are available to the model during the conversation.""" completion_args: OptionalNullable[CompletionArgs] = UNSET @@ -79,8 +83,12 @@ class ConversationStreamRequest(BaseModel): description: OptionalNullable[str] = UNSET + metadata: OptionalNullable[Dict[str, Any]] = UNSET + agent_id: OptionalNullable[str] = UNSET + agent_version: OptionalNullable[int] = UNSET + model: OptionalNullable[str] = UNSET @model_serializer(mode="wrap") @@ -94,18 +102,21 @@ def serialize_model(self, handler): "completion_args", "name", "description", + "metadata", "agent_id", + "agent_version", "model", ] nullable_fields = [ "store", "handoff_execution", "instructions", - "tools", "completion_args", "name", "description", + "metadata", "agent_id", + "agent_version", "model", ] null_default_fields = [] diff --git a/src/mistralai/models/documentout.py b/src/mistralai/models/documentout.py index 65f1be80..81d9605f 100644 --- a/src/mistralai/models/documentout.py +++ b/src/mistralai/models/documentout.py @@ -4,20 +4,21 @@ from datetime import datetime from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer +from typing import Any, Dict from typing_extensions import NotRequired, TypedDict class DocumentOutTypedDict(TypedDict): id: str library_id: str - hash: str - mime_type: str - extension: str - size: int + hash: Nullable[str] + mime_type: Nullable[str] + extension: Nullable[str] + size: Nullable[int] name: str created_at: datetime processing_status: str - uploaded_by_id: str + uploaded_by_id: Nullable[str] uploaded_by_type: str tokens_processing_total: int summary: NotRequired[Nullable[str]] @@ -25,6 +26,8 @@ class DocumentOutTypedDict(TypedDict): number_of_pages: NotRequired[Nullable[int]] tokens_processing_main_content: NotRequired[Nullable[int]] tokens_processing_summary: NotRequired[Nullable[int]] + url: NotRequired[Nullable[str]] + attributes: NotRequired[Nullable[Dict[str, Any]]] class DocumentOut(BaseModel): @@ -32,13 +35,13 @@ class DocumentOut(BaseModel): library_id: str - hash: str + hash: Nullable[str] - mime_type: str + mime_type: Nullable[str] - extension: str + extension: Nullable[str] - size: int + size: Nullable[int] name: str @@ -46,7 +49,7 @@ class DocumentOut(BaseModel): processing_status: str - uploaded_by_id: str + uploaded_by_id: Nullable[str] uploaded_by_type: str @@ -62,6 +65,10 @@ class DocumentOut(BaseModel): tokens_processing_summary: OptionalNullable[int] = UNSET + url: OptionalNullable[str] = UNSET + + attributes: OptionalNullable[Dict[str, Any]] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -70,13 +77,22 @@ def serialize_model(self, handler): "number_of_pages", "tokens_processing_main_content", "tokens_processing_summary", + "url", + "attributes", ] nullable_fields = [ + "hash", + "mime_type", + "extension", + "size", "summary", "last_processed_at", "number_of_pages", + "uploaded_by_id", "tokens_processing_main_content", "tokens_processing_summary", + "url", + "attributes", ] null_default_fields = [] diff --git a/src/mistralai/models/documentupdatein.py b/src/mistralai/models/documentupdatein.py index 0f6abd5b..bd89ff47 100644 --- a/src/mistralai/models/documentupdatein.py +++ b/src/mistralai/models/documentupdatein.py @@ -1,22 +1,43 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from datetime import datetime from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict +from typing import Dict, List, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +AttributesTypedDict = TypeAliasType( + "AttributesTypedDict", + Union[ + bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] + ], +) + + +Attributes = TypeAliasType( + "Attributes", + Union[ + bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] + ], +) class DocumentUpdateInTypedDict(TypedDict): name: NotRequired[Nullable[str]] + attributes: NotRequired[Nullable[Dict[str, AttributesTypedDict]]] class DocumentUpdateIn(BaseModel): name: OptionalNullable[str] = UNSET + attributes: OptionalNullable[Dict[str, Attributes]] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["name"] - nullable_fields = ["name"] + optional_fields = ["name", "attributes"] + nullable_fields = ["name", "attributes"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 685f27fd..4af890a3 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -13,33 +13,33 @@ EmbeddingRequestInputsTypedDict = TypeAliasType( "EmbeddingRequestInputsTypedDict", Union[str, List[str]] ) -r"""Text to embed.""" +r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" EmbeddingRequestInputs = TypeAliasType("EmbeddingRequestInputs", Union[str, List[str]]) -r"""Text to embed.""" +r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" class EmbeddingRequestTypedDict(TypedDict): model: str - r"""ID of the model to use.""" + r"""The ID of the model to be used for embedding.""" inputs: EmbeddingRequestInputsTypedDict - r"""Text to embed.""" + r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" output_dimension: NotRequired[Nullable[int]] - r"""The dimension of the output embeddings.""" + r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" output_dtype: NotRequired[EmbeddingDtype] encoding_format: NotRequired[EncodingFormat] class EmbeddingRequest(BaseModel): model: str - r"""ID of the model to use.""" + r"""The ID of the model to be used for embedding.""" inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")] - r"""Text to embed.""" + r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" output_dimension: OptionalNullable[int] = UNSET - r"""The dimension of the output embeddings.""" + r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" output_dtype: Optional[EmbeddingDtype] = None diff --git a/src/mistralai/models/files_api_routes_list_filesop.py b/src/mistralai/models/files_api_routes_list_filesop.py index 5060c3b8..8e174a58 100644 --- a/src/mistralai/models/files_api_routes_list_filesop.py +++ b/src/mistralai/models/files_api_routes_list_filesop.py @@ -15,6 +15,7 @@ class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] + include_total: NotRequired[bool] sample_type: NotRequired[Nullable[List[SampleType]]] source: NotRequired[Nullable[List[Source]]] search: NotRequired[Nullable[str]] @@ -32,6 +33,11 @@ class FilesAPIRoutesListFilesRequest(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = 100 + include_total: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = True + sample_type: Annotated[ OptionalNullable[ List[Annotated[SampleType, PlainValidator(validate_open_enum(False))]] @@ -63,6 +69,7 @@ def serialize_model(self, handler): optional_fields = [ "page", "page_size", + "include_total", "sample_type", "source", "search", diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py index 06210139..801a358b 100644 --- a/src/mistralai/models/fimcompletionrequest.py +++ b/src/mistralai/models/fimcompletionrequest.py @@ -3,7 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict @@ -21,10 +21,7 @@ class FIMCompletionRequestTypedDict(TypedDict): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" temperature: NotRequired[Nullable[float]] @@ -39,6 +36,7 @@ class FIMCompletionRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" min_tokens: NotRequired[Nullable[int]] @@ -47,10 +45,7 @@ class FIMCompletionRequestTypedDict(TypedDict): class FIMCompletionRequest(BaseModel): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" @@ -73,6 +68,8 @@ class FIMCompletionRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" @@ -88,6 +85,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "suffix", "min_tokens", ] @@ -95,6 +93,7 @@ def serialize_model(self, handler): "temperature", "max_tokens", "random_seed", + "metadata", "suffix", "min_tokens", ] diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py index 05cc345b..2e8e6db2 100644 --- a/src/mistralai/models/fimcompletionstreamrequest.py +++ b/src/mistralai/models/fimcompletionstreamrequest.py @@ -3,7 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict @@ -21,10 +21,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" temperature: NotRequired[Nullable[float]] @@ -38,6 +35,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" min_tokens: NotRequired[Nullable[int]] @@ -46,10 +44,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): class FIMCompletionStreamRequest(BaseModel): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" @@ -71,6 +66,8 @@ class FIMCompletionStreamRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" @@ -86,6 +83,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "suffix", "min_tokens", ] @@ -93,6 +91,7 @@ def serialize_model(self, handler): "temperature", "max_tokens", "random_seed", + "metadata", "suffix", "min_tokens", ] diff --git a/src/mistralai/models/libraries_documents_list_v1op.py b/src/mistralai/models/libraries_documents_list_v1op.py index 04a3ed25..e6ff29cf 100644 --- a/src/mistralai/models/libraries_documents_list_v1op.py +++ b/src/mistralai/models/libraries_documents_list_v1op.py @@ -13,6 +13,7 @@ class LibrariesDocumentsListV1RequestTypedDict(TypedDict): search: NotRequired[Nullable[str]] page_size: NotRequired[int] page: NotRequired[int] + filters_attributes: NotRequired[Nullable[str]] sort_by: NotRequired[str] sort_order: NotRequired[str] @@ -37,6 +38,11 @@ class LibrariesDocumentsListV1Request(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = 0 + filters_attributes: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + sort_by: Annotated[ Optional[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -49,8 +55,15 @@ class LibrariesDocumentsListV1Request(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["search", "page_size", "page", "sort_by", "sort_order"] - nullable_fields = ["search"] + optional_fields = [ + "search", + "page_size", + "page", + "filters_attributes", + "sort_by", + "sort_order", + ] + nullable_fields = ["search", "filters_attributes"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/libraryout.py b/src/mistralai/models/libraryout.py index 6a13130d..d3bc36f9 100644 --- a/src/mistralai/models/libraryout.py +++ b/src/mistralai/models/libraryout.py @@ -12,18 +12,19 @@ class LibraryOutTypedDict(TypedDict): name: str created_at: datetime updated_at: datetime - owner_id: str + owner_id: Nullable[str] owner_type: str total_size: int nb_documents: int chunk_size: Nullable[int] emoji: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] - generated_name: NotRequired[Nullable[str]] generated_description: NotRequired[Nullable[str]] explicit_user_members_count: NotRequired[Nullable[int]] explicit_workspace_members_count: NotRequired[Nullable[int]] org_sharing_role: NotRequired[Nullable[str]] + generated_name: NotRequired[Nullable[str]] + r"""Generated Name""" class LibraryOut(BaseModel): @@ -35,7 +36,7 @@ class LibraryOut(BaseModel): updated_at: datetime - owner_id: str + owner_id: Nullable[str] owner_type: str @@ -49,8 +50,6 @@ class LibraryOut(BaseModel): description: OptionalNullable[str] = UNSET - generated_name: OptionalNullable[str] = UNSET - generated_description: OptionalNullable[str] = UNSET explicit_user_members_count: OptionalNullable[int] = UNSET @@ -59,26 +58,30 @@ class LibraryOut(BaseModel): org_sharing_role: OptionalNullable[str] = UNSET + generated_name: OptionalNullable[str] = UNSET + r"""Generated Name""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "emoji", "description", - "generated_name", "generated_description", "explicit_user_members_count", "explicit_workspace_members_count", "org_sharing_role", + "generated_name", ] nullable_fields = [ + "owner_id", "chunk_size", "emoji", "description", - "generated_name", "generated_description", "explicit_user_members_count", "explicit_workspace_members_count", "org_sharing_role", + "generated_name", ] null_default_fields = [] diff --git a/src/mistralai/models/listfilesout.py b/src/mistralai/models/listfilesout.py index b032f632..2f82b37d 100644 --- a/src/mistralai/models/listfilesout.py +++ b/src/mistralai/models/listfilesout.py @@ -2,15 +2,16 @@ from __future__ import annotations from .fileschema import FileSchema, FileSchemaTypedDict -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer from typing import List -from typing_extensions import TypedDict +from typing_extensions import NotRequired, TypedDict class ListFilesOutTypedDict(TypedDict): data: List[FileSchemaTypedDict] object: str - total: int + total: NotRequired[Nullable[int]] class ListFilesOut(BaseModel): @@ -18,4 +19,34 @@ class ListFilesOut(BaseModel): object: str - total: int + total: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["total"] + nullable_fields = ["total"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py index 54c5f2a2..4b5d5da7 100644 --- a/src/mistralai/models/modelcapabilities.py +++ b/src/mistralai/models/modelcapabilities.py @@ -8,22 +8,31 @@ class ModelCapabilitiesTypedDict(TypedDict): completion_chat: NotRequired[bool] - completion_fim: NotRequired[bool] function_calling: NotRequired[bool] + completion_fim: NotRequired[bool] fine_tuning: NotRequired[bool] vision: NotRequired[bool] + ocr: NotRequired[bool] classification: NotRequired[bool] + moderation: NotRequired[bool] + audio: NotRequired[bool] class ModelCapabilities(BaseModel): - completion_chat: Optional[bool] = True + completion_chat: Optional[bool] = False - completion_fim: Optional[bool] = False + function_calling: Optional[bool] = False - function_calling: Optional[bool] = True + completion_fim: Optional[bool] = False fine_tuning: Optional[bool] = False vision: Optional[bool] = False + ocr: Optional[bool] = False + classification: Optional[bool] = False + + moderation: Optional[bool] = False + + audio: Optional[bool] = False diff --git a/src/mistralai/models/modelconversation.py b/src/mistralai/models/modelconversation.py index 4ced79ea..e413b6fb 100644 --- a/src/mistralai/models/modelconversation.py +++ b/src/mistralai/models/modelconversation.py @@ -12,7 +12,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -60,6 +60,8 @@ class ModelConversationTypedDict(TypedDict): r"""Name given to the conversation.""" description: NotRequired[Nullable[str]] r"""Description of the what the conversation is about.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" object: NotRequired[ModelConversationObject] @@ -87,6 +89,9 @@ class ModelConversation(BaseModel): description: OptionalNullable[str] = UNSET r"""Description of the what the conversation is about.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + object: Optional[ModelConversationObject] = "conversation" @model_serializer(mode="wrap") @@ -97,9 +102,10 @@ def serialize_model(self, handler): "completion_args", "name", "description", + "metadata", "object", ] - nullable_fields = ["instructions", "name", "description"] + nullable_fields = ["instructions", "name", "description", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/ocrpageobject.py b/src/mistralai/models/ocrpageobject.py index 94624a16..737defba 100644 --- a/src/mistralai/models/ocrpageobject.py +++ b/src/mistralai/models/ocrpageobject.py @@ -3,10 +3,11 @@ from __future__ import annotations from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +from .ocrtableobject import OCRTableObject, OCRTableObjectTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List -from typing_extensions import TypedDict +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class OCRPageObjectTypedDict(TypedDict): @@ -18,6 +19,14 @@ class OCRPageObjectTypedDict(TypedDict): r"""List of all extracted images in the page""" dimensions: Nullable[OCRPageDimensionsTypedDict] r"""The dimensions of the PDF Page's screenshot image""" + tables: NotRequired[List[OCRTableObjectTypedDict]] + r"""List of all extracted tables in the page""" + hyperlinks: NotRequired[List[str]] + r"""List of all hyperlinks in the page""" + header: NotRequired[Nullable[str]] + r"""Header of the page""" + footer: NotRequired[Nullable[str]] + r"""Footer of the page""" class OCRPageObject(BaseModel): @@ -33,10 +42,22 @@ class OCRPageObject(BaseModel): dimensions: Nullable[OCRPageDimensions] r"""The dimensions of the PDF Page's screenshot image""" + tables: Optional[List[OCRTableObject]] = None + r"""List of all extracted tables in the page""" + + hyperlinks: Optional[List[str]] = None + r"""List of all hyperlinks in the page""" + + header: OptionalNullable[str] = UNSET + r"""Header of the page""" + + footer: OptionalNullable[str] = UNSET + r"""Footer of the page""" + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["dimensions"] + optional_fields = ["tables", "hyperlinks", "header", "footer"] + nullable_fields = ["header", "footer", "dimensions"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py index df932c2a..e600d5b6 100644 --- a/src/mistralai/models/ocrrequest.py +++ b/src/mistralai/models/ocrrequest.py @@ -7,7 +7,7 @@ from .responseformat import ResponseFormat, ResponseFormatTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional, Union +from typing import List, Literal, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict @@ -22,6 +22,9 @@ r"""Document to run OCR on""" +TableFormat = Literal["markdown", "html"] + + class OCRRequestTypedDict(TypedDict): model: Nullable[str] document: DocumentTypedDict @@ -39,6 +42,9 @@ class OCRRequestTypedDict(TypedDict): r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + table_format: NotRequired[Nullable[TableFormat]] + extract_header: NotRequired[bool] + extract_footer: NotRequired[bool] class OCRRequest(BaseModel): @@ -67,6 +73,12 @@ class OCRRequest(BaseModel): document_annotation_format: OptionalNullable[ResponseFormat] = UNSET r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + table_format: OptionalNullable[TableFormat] = UNSET + + extract_header: Optional[bool] = None + + extract_footer: Optional[bool] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -77,6 +89,9 @@ def serialize_model(self, handler): "image_min_size", "bbox_annotation_format", "document_annotation_format", + "table_format", + "extract_header", + "extract_footer", ] nullable_fields = [ "model", @@ -86,6 +101,7 @@ def serialize_model(self, handler): "image_min_size", "bbox_annotation_format", "document_annotation_format", + "table_format", ] null_default_fields = [] diff --git a/src/mistralai/models/ocrtableobject.py b/src/mistralai/models/ocrtableobject.py new file mode 100644 index 00000000..76f21f3b --- /dev/null +++ b/src/mistralai/models/ocrtableobject.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +Format = Literal["markdown", "html"] +r"""Format of the table""" + + +class OCRTableObjectTypedDict(TypedDict): + id: str + r"""Table ID for extracted table in a page""" + content: str + r"""Content of the table in the given format""" + format_: Format + r"""Format of the table""" + + +class OCRTableObject(BaseModel): + id: str + r"""Table ID for extracted table in a page""" + + content: str + r"""Content of the table in the given format""" + + format_: Annotated[Format, pydantic.Field(alias="format")] + r"""Format of the table""" diff --git a/src/mistralai/models/prediction.py b/src/mistralai/models/prediction.py index 7937c9d1..582d8789 100644 --- a/src/mistralai/models/prediction.py +++ b/src/mistralai/models/prediction.py @@ -10,11 +10,15 @@ class PredictionTypedDict(TypedDict): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + type: Literal["content"] content: NotRequired[str] class Prediction(BaseModel): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + TYPE: Annotated[ Annotated[ Optional[Literal["content"]], AfterValidator(validate_const("content")) diff --git a/src/mistralai/models/requestsource.py b/src/mistralai/models/requestsource.py new file mode 100644 index 00000000..5ab93af0 --- /dev/null +++ b/src/mistralai/models/requestsource.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +RequestSource = Literal["api", "playground", "agent_builder_v1"] diff --git a/src/mistralai/models/responseformat.py b/src/mistralai/models/responseformat.py index c9319989..92284017 100644 --- a/src/mistralai/models/responseformat.py +++ b/src/mistralai/models/responseformat.py @@ -10,14 +10,16 @@ class ResponseFormatTypedDict(TypedDict): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + type: NotRequired[ResponseFormats] - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + type: Optional[ResponseFormats] = None - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" json_schema: OptionalNullable[JSONSchema] = UNSET diff --git a/src/mistralai/models/responseformats.py b/src/mistralai/models/responseformats.py index 08c39951..258fe70e 100644 --- a/src/mistralai/models/responseformats.py +++ b/src/mistralai/models/responseformats.py @@ -5,4 +5,3 @@ ResponseFormats = Literal["text", "json_object", "json_schema"] -r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/src/mistralai/models/sharingdelete.py b/src/mistralai/models/sharingdelete.py index b9df5f9d..d1cd7074 100644 --- a/src/mistralai/models/sharingdelete.py +++ b/src/mistralai/models/sharingdelete.py @@ -2,25 +2,56 @@ from __future__ import annotations from .entitytype import EntityType -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum +from pydantic import model_serializer from pydantic.functional_validators import PlainValidator -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict class SharingDeleteTypedDict(TypedDict): - org_id: str share_with_uuid: str r"""The id of the entity (user, workspace or organization) to share with""" share_with_type: EntityType r"""The type of entity, used to share a library.""" + org_id: NotRequired[Nullable[str]] class SharingDelete(BaseModel): - org_id: str - share_with_uuid: str r"""The id of the entity (user, workspace or organization) to share with""" share_with_type: Annotated[EntityType, PlainValidator(validate_open_enum(False))] r"""The type of entity, used to share a library.""" + + org_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["org_id"] + nullable_fields = ["org_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/sharingin.py b/src/mistralai/models/sharingin.py index af20fd14..d3ada343 100644 --- a/src/mistralai/models/sharingin.py +++ b/src/mistralai/models/sharingin.py @@ -3,24 +3,23 @@ from __future__ import annotations from .entitytype import EntityType from .shareenum import ShareEnum -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum +from pydantic import model_serializer from pydantic.functional_validators import PlainValidator -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict class SharingInTypedDict(TypedDict): - org_id: str level: ShareEnum share_with_uuid: str r"""The id of the entity (user, workspace or organization) to share with""" share_with_type: EntityType r"""The type of entity, used to share a library.""" + org_id: NotRequired[Nullable[str]] class SharingIn(BaseModel): - org_id: str - level: Annotated[ShareEnum, PlainValidator(validate_open_enum(False))] share_with_uuid: str @@ -28,3 +27,35 @@ class SharingIn(BaseModel): share_with_type: Annotated[EntityType, PlainValidator(validate_open_enum(False))] r"""The type of entity, used to share a library.""" + + org_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["org_id"] + nullable_fields = ["org_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/sharingout.py b/src/mistralai/models/sharingout.py index a78a7764..12455818 100644 --- a/src/mistralai/models/sharingout.py +++ b/src/mistralai/models/sharingout.py @@ -11,7 +11,7 @@ class SharingOutTypedDict(TypedDict): org_id: str role: str share_with_type: str - share_with_uuid: str + share_with_uuid: Nullable[str] user_id: NotRequired[Nullable[str]] @@ -24,14 +24,14 @@ class SharingOut(BaseModel): share_with_type: str - share_with_uuid: str + share_with_uuid: Nullable[str] user_id: OptionalNullable[str] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["user_id"] - nullable_fields = ["user_id"] + nullable_fields = ["user_id", "share_with_uuid"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/toolexecutiondeltaevent.py b/src/mistralai/models/toolexecutiondeltaevent.py index 99b97e68..25438206 100644 --- a/src/mistralai/models/toolexecutiondeltaevent.py +++ b/src/mistralai/models/toolexecutiondeltaevent.py @@ -4,16 +4,25 @@ from .builtinconnectors import BuiltInConnectors from datetime import datetime from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict ToolExecutionDeltaEventType = Literal["tool.execution.delta"] +ToolExecutionDeltaEventNameTypedDict = TypeAliasType( + "ToolExecutionDeltaEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionDeltaEventName = TypeAliasType( + "ToolExecutionDeltaEventName", Union[BuiltInConnectors, str] +) + class ToolExecutionDeltaEventTypedDict(TypedDict): id: str - name: BuiltInConnectors + name: ToolExecutionDeltaEventNameTypedDict arguments: str type: NotRequired[ToolExecutionDeltaEventType] created_at: NotRequired[datetime] @@ -23,7 +32,7 @@ class ToolExecutionDeltaEventTypedDict(TypedDict): class ToolExecutionDeltaEvent(BaseModel): id: str - name: BuiltInConnectors + name: ToolExecutionDeltaEventName arguments: str diff --git a/src/mistralai/models/toolexecutiondoneevent.py b/src/mistralai/models/toolexecutiondoneevent.py index c73d943a..2dea3324 100644 --- a/src/mistralai/models/toolexecutiondoneevent.py +++ b/src/mistralai/models/toolexecutiondoneevent.py @@ -4,16 +4,25 @@ from .builtinconnectors import BuiltInConnectors from datetime import datetime from mistralai.types import BaseModel -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict ToolExecutionDoneEventType = Literal["tool.execution.done"] +ToolExecutionDoneEventNameTypedDict = TypeAliasType( + "ToolExecutionDoneEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionDoneEventName = TypeAliasType( + "ToolExecutionDoneEventName", Union[BuiltInConnectors, str] +) + class ToolExecutionDoneEventTypedDict(TypedDict): id: str - name: BuiltInConnectors + name: ToolExecutionDoneEventNameTypedDict type: NotRequired[ToolExecutionDoneEventType] created_at: NotRequired[datetime] output_index: NotRequired[int] @@ -23,7 +32,7 @@ class ToolExecutionDoneEventTypedDict(TypedDict): class ToolExecutionDoneEvent(BaseModel): id: str - name: BuiltInConnectors + name: ToolExecutionDoneEventName type: Optional[ToolExecutionDoneEventType] = "tool.execution.done" diff --git a/src/mistralai/models/toolexecutionentry.py b/src/mistralai/models/toolexecutionentry.py index db503ea8..abe53e06 100644 --- a/src/mistralai/models/toolexecutionentry.py +++ b/src/mistralai/models/toolexecutionentry.py @@ -5,17 +5,22 @@ from datetime import datetime from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict ToolExecutionEntryObject = Literal["entry"] ToolExecutionEntryType = Literal["tool.execution"] +NameTypedDict = TypeAliasType("NameTypedDict", Union[BuiltInConnectors, str]) + + +Name = TypeAliasType("Name", Union[BuiltInConnectors, str]) + class ToolExecutionEntryTypedDict(TypedDict): - name: BuiltInConnectors + name: NameTypedDict arguments: str object: NotRequired[ToolExecutionEntryObject] type: NotRequired[ToolExecutionEntryType] @@ -26,7 +31,7 @@ class ToolExecutionEntryTypedDict(TypedDict): class ToolExecutionEntry(BaseModel): - name: BuiltInConnectors + name: Name arguments: str diff --git a/src/mistralai/models/toolexecutionstartedevent.py b/src/mistralai/models/toolexecutionstartedevent.py index 7a54058f..cf4ecbfc 100644 --- a/src/mistralai/models/toolexecutionstartedevent.py +++ b/src/mistralai/models/toolexecutionstartedevent.py @@ -4,16 +4,25 @@ from .builtinconnectors import BuiltInConnectors from datetime import datetime from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict ToolExecutionStartedEventType = Literal["tool.execution.started"] +ToolExecutionStartedEventNameTypedDict = TypeAliasType( + "ToolExecutionStartedEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionStartedEventName = TypeAliasType( + "ToolExecutionStartedEventName", Union[BuiltInConnectors, str] +) + class ToolExecutionStartedEventTypedDict(TypedDict): id: str - name: BuiltInConnectors + name: ToolExecutionStartedEventNameTypedDict arguments: str type: NotRequired[ToolExecutionStartedEventType] created_at: NotRequired[datetime] @@ -23,7 +32,7 @@ class ToolExecutionStartedEventTypedDict(TypedDict): class ToolExecutionStartedEvent(BaseModel): id: str - name: BuiltInConnectors + name: ToolExecutionStartedEventName arguments: str diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index b6cc3186..bf82cc16 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -73,18 +73,12 @@ def list( ), ), request=req, - error_status_codes=["422", "4XX", "5XX"], + error_status_codes=["4XX", "5XX"], retry_config=retry_config, ) - response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response(models.ModelList, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -155,18 +149,12 @@ async def list_async( ), ), request=req, - error_status_codes=["422", "4XX", "5XX"], + error_status_codes=["4XX", "5XX"], retry_config=retry_config, ) - response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response(models.ModelList, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py index bed8b7be..6b283b35 100644 --- a/src/mistralai/ocr.py +++ b/src/mistralai/ocr.py @@ -28,6 +28,9 @@ def process( document_annotation_format: OptionalNullable[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -44,6 +47,9 @@ def process( :param image_min_size: Minimum height and width of image to extract :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param table_format: + :param extract_header: + :param extract_footer: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -73,6 +79,9 @@ def process( document_annotation_format=utils.get_pydantic_model( document_annotation_format, OptionalNullable[models.ResponseFormat] ), + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, ) req = self._build_request( @@ -150,6 +159,9 @@ async def process_async( document_annotation_format: OptionalNullable[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -166,6 +178,9 @@ async def process_async( :param image_min_size: Minimum height and width of image to extract :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param table_format: + :param extract_header: + :param extract_footer: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -195,6 +210,9 @@ async def process_async( document_annotation_format=utils.get_pydantic_model( document_annotation_format, OptionalNullable[models.ResponseFormat] ), + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, ) req = self._build_request_async( diff --git a/src/mistralai/transcriptions.py b/src/mistralai/transcriptions.py index 3e2de6f5..dc8ad2e8 100644 --- a/src/mistralai/transcriptions.py +++ b/src/mistralai/transcriptions.py @@ -29,7 +29,7 @@ def complete( ) -> models.TranscriptionResponse: r"""Create Transcription - :param model: + :param model: ID of the model to be used. :param file: :param file_url: Url of a file to be transcribed :param file_id: ID of a file uploaded to /v1/files @@ -131,7 +131,7 @@ async def complete_async( ) -> models.TranscriptionResponse: r"""Create Transcription - :param model: + :param model: ID of the model to be used. :param file: :param file_url: Url of a file to be transcribed :param file_id: ID of a file uploaded to /v1/files @@ -231,7 +231,7 @@ def stream( timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, ) -> eventstreaming.EventStream[models.TranscriptionStreamEvents]: - r"""Create streaming transcription (SSE) + r"""Create Streaming Transcription (SSE) :param model: :param file: @@ -343,7 +343,7 @@ async def stream_async( timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, ) -> eventstreaming.EventStreamAsync[models.TranscriptionStreamEvents]: - r"""Create streaming transcription (SSE) + r"""Create Streaming Transcription (SSE) :param model: :param file: